diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile deleted file mode 100644 index 4234de160..000000000 --- a/.ci/Jenkinsfile +++ /dev/null @@ -1,81 +0,0 @@ -def golang = ['1.23', '1.24'] -def golangDefault = "golang:${golang.last()}" - -async { - - for (version in golang) { - def go = version - - task("test/go${go}") { - container("golang:${go}") { - sh 'make test' - } - } - - task("build/go${go}") { - container("golang:${go}") { - for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { - sh """ - make bin/frostfs-${app} - bin/frostfs-${app} --version - """ - } - } - } - } - - task('test/race') { - container(golangDefault) { - sh 'make test GOFLAGS="-count=1 -race"' - } - } - - task('lint') { - container(golangDefault) { - sh 'make lint-install lint' - } - } - - task('staticcheck') { - container(golangDefault) { - sh 'make staticcheck-install staticcheck-run' - } - } - - task('gopls') { - container(golangDefault) { - sh 'make gopls-install gopls-run' - } - } - - task('gofumpt') { - container(golangDefault) { - sh ''' - make fumpt-install - make fumpt - git diff --exit-code --quiet - ''' - } - } - - task('vulncheck') { - container(golangDefault) { - sh ''' - go install golang.org/x/vuln/cmd/govulncheck@latest - govulncheck ./... - ''' - } - } - - task('pre-commit') { - dockerfile(""" - FROM ${golangDefault} - RUN apt update && \ - apt install -y --no-install-recommends pre-commit - """) { - withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { - sh 'pre-commit run --color=always --hook-stage=manual --all-files' - } - } - } -} diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm deleted file mode 100644 index 42aeebc48..000000000 --- a/.docker/Dockerfile.adm +++ /dev/null @@ -1,19 +0,0 @@ -FROM golang:1.23 AS builder -ARG BUILD=now -ARG VERSION=dev -ARG REPO=repository -WORKDIR /src -COPY . /src - -RUN make bin/frostfs-adm - -# Executable image -FROM alpine AS frostfs-adm -RUN apk add --no-cache bash - -WORKDIR / - -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=builder /src/bin/frostfs-adm /bin/frostfs-adm - -CMD ["frostfs-adm"] diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci deleted file mode 100644 index 9ddd8de59..000000000 --- a/.docker/Dockerfile.ci +++ /dev/null @@ -1,25 +0,0 @@ -FROM golang:1.23 - -WORKDIR /tmp - -# Install apt packages -RUN apt-get update && apt-get install --no-install-recommends -y \ - pip \ - && apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \ - && rm -rf /var/lib/apt/lists/* - -# Dash → Bash -RUN echo "dash dash/sh boolean false" | debconf-set-selections -RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash - -RUN useradd -u 1234 -d /home/ci -m ci -USER ci - -ENV PATH="$PATH:/home/ci/.local/bin" - -COPY .pre-commit-config.yaml . - -RUN pip install "pre-commit==3.1.1" \ - && git init . \ - && pre-commit install-hooks \ - && rm -rf /tmp/* diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli deleted file mode 100644 index 16f130056..000000000 --- a/.docker/Dockerfile.cli +++ /dev/null @@ -1,19 +0,0 @@ -FROM golang:1.23 AS builder -ARG BUILD=now -ARG VERSION=dev -ARG REPO=repository -WORKDIR /src -COPY . /src - -RUN make bin/frostfs-cli - -# Executable image -FROM alpine AS frostfs-cli -RUN apk add --no-cache bash - -WORKDIR / - -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=builder /src/bin/frostfs-cli /bin/frostfs-cli - -CMD ["frostfs-cli"] diff --git a/.docker/Dockerfile.dirty-adm b/.docker/Dockerfile.dirty-adm deleted file mode 100644 index 1515b9437..000000000 --- a/.docker/Dockerfile.dirty-adm +++ /dev/null @@ -1,8 +0,0 @@ -FROM alpine -RUN apk add --no-cache bash ca-certificates - -WORKDIR / - -COPY bin/frostfs-adm /bin/frostfs-adm - -CMD ["frostfs-adm"] diff --git a/.docker/Dockerfile.dirty-cli b/.docker/Dockerfile.dirty-cli deleted file mode 100644 index 7718a2ac5..000000000 --- a/.docker/Dockerfile.dirty-cli +++ /dev/null @@ -1,8 +0,0 @@ -FROM alpine -RUN apk add --no-cache bash ca-certificates - -WORKDIR / - -COPY bin/frostfs-cli /bin/frostfs-cli - -CMD ["frostfs-cli"] diff --git a/.docker/Dockerfile.dirty-ir b/.docker/Dockerfile.dirty-ir deleted file mode 100644 index 7e4e33fd7..000000000 --- a/.docker/Dockerfile.dirty-ir +++ /dev/null @@ -1,8 +0,0 @@ -FROM alpine -RUN apk add --no-cache bash ca-certificates - -WORKDIR / - -COPY bin/frostfs-ir /bin/frostfs-ir - -CMD ["frostfs-ir"] diff --git a/.docker/Dockerfile.dirty-storage b/.docker/Dockerfile.dirty-storage deleted file mode 100644 index 098712def..000000000 --- a/.docker/Dockerfile.dirty-storage +++ /dev/null @@ -1,8 +0,0 @@ -FROM alpine -RUN apk add --no-cache bash ca-certificates - -WORKDIR / - -COPY bin/frostfs-node /bin/frostfs-node - -CMD ["frostfs-node"] diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir deleted file mode 100644 index c119f8127..000000000 --- a/.docker/Dockerfile.ir +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.23 AS builder -ARG BUILD=now -ARG VERSION=dev -ARG REPO=repository -WORKDIR /src -COPY . /src - -RUN make bin/frostfs-ir - -# Executable image -FROM alpine AS frostfs-ir -RUN apk add --no-cache bash - -WORKDIR / - -COPY --from=builder /src/bin/frostfs-ir /bin/frostfs-ir - -CMD ["frostfs-ir"] diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage deleted file mode 100644 index 854f7adea..000000000 --- a/.docker/Dockerfile.storage +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.23 AS builder -ARG BUILD=now -ARG VERSION=dev -ARG REPO=repository -WORKDIR /src -COPY . /src - -RUN make bin/frostfs-node - -# Executable image -FROM alpine AS frostfs-node -RUN apk add --no-cache bash - -WORKDIR / - -COPY --from=builder /src/bin/frostfs-node /bin/frostfs-node - -CMD ["frostfs-node"] diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 6f1699bf0..000000000 --- a/.dockerignore +++ /dev/null @@ -1,9 +0,0 @@ -.idea -.vscode -.git -docker-compose.yml -Dockerfile -temp -.dockerignore -docker -.cache diff --git a/.forgejo/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index fb169997c..000000000 --- a/.forgejo/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: community, triage, bug -assignees: '' - ---- - - - -## Expected Behavior - - -## Current Behavior - - -## Possible Solution - - -## Steps to Reproduce (for bugs) - - -1. - -## Context - - -## Regression - - -## Your Environment - -* Version used: -* Server setup and configuration: -* Operating System and version (`uname -a`): diff --git a/.forgejo/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 3ba13e0ce..000000000 --- a/.forgejo/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1 +0,0 @@ -blank_issues_enabled: false diff --git a/.forgejo/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 5beeb0641..000000000 --- a/.forgejo/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: community, triage -assignees: '' - ---- - -## Is your feature request related to a problem? Please describe. - - -## Describe the solution you'd like - - -## Describe alternatives you've considered - - -## Additional context - - -## Don't forget to add labels! -- component label (`neofs-adm`, `neofs-storage`, ...) -- issue type (`enhancement`, `refactor`, ...) -- `goodfirstissue`, `helpwanted` if needed -- does this issue belong to an epic? -- priority (`P0`-`P4`) if already triaged -- quarter label (`202XQY`) if possible diff --git a/.forgejo/logo.svg b/.forgejo/logo.svg deleted file mode 100644 index 148c359d5..000000000 --- a/.forgejo/logo.svg +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml deleted file mode 100644 index d568b9607..000000000 --- a/.forgejo/workflows/build.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Build - -on: - pull_request: - push: - branches: - - master - -jobs: - build: - name: Build Components - runs-on: ubuntu-latest - strategy: - matrix: - go_versions: [ '1.23', '1.24' ] - - steps: - - uses: actions/checkout@v3 - with: - # Allows to fetch all history for all branches and tags. - # Need this for proper versioning. - fetch-depth: 0 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '${{ matrix.go_versions }}' - - - name: Build CLI - run: make bin/frostfs-cli - - run: bin/frostfs-cli --version - - - name: Build NODE - run: make bin/frostfs-node - - - name: Build IR - run: make bin/frostfs-ir - - - name: Build ADM - run: make bin/frostfs-adm - - run: bin/frostfs-adm --version - - - name: Build LENS - run: make bin/frostfs-lens - - run: bin/frostfs-lens --version diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml deleted file mode 100644 index 190d7764a..000000000 --- a/.forgejo/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO action -on: [pull_request] - -jobs: - dco: - name: DCO - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - - - name: Run commit format checker - uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 - with: - from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml deleted file mode 100644 index fe91d65f9..000000000 --- a/.forgejo/workflows/oci-image.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: OCI image - -on: - push: - workflow_dispatch: - -jobs: - image: - name: Build container images - runs-on: docker - container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm - steps: - - name: Clone git repo - uses: actions/checkout@v3 - - - name: Build OCI image - run: make images - - - name: Push image to OCI registry - run: | - echo "$REGISTRY_PASSWORD" \ - | docker login --username truecloudlab --password-stdin git.frostfs.info - make push-images - if: >- - startsWith(github.ref, 'refs/tags/v') && - (github.event_name == 'workflow_dispatch' || github.event_name == 'push') - env: - REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml deleted file mode 100644 index c2e293175..000000000 --- a/.forgejo/workflows/pre-commit.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: Pre-commit hooks - -on: - pull_request: - push: - branches: - - master - -jobs: - precommit: - name: Pre-commit - env: - # Skip pre-commit hooks which are executed by other actions. - SKIP: make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt - runs-on: ubuntu-22.04 - # If we use actions/setup-python from either Github or Gitea, - # the line above fails with a cryptic error about not being able to find python. - # So install everything manually. - steps: - - uses: actions/checkout@v3 - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: 1.24 - - name: Set up Python - run: | - apt update - apt install -y pre-commit - - name: Run pre-commit - run: pre-commit run --color=always --hook-stage manual --all-files diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml deleted file mode 100644 index f3f5432ce..000000000 --- a/.forgejo/workflows/tests.yml +++ /dev/null @@ -1,116 +0,0 @@ -name: Tests and linters - -on: - pull_request: - push: - branches: - - master - -jobs: - lint: - name: Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - cache: true - - - name: Install linters - run: make lint-install - - - name: Run linters - run: make lint - - tests: - name: Tests - runs-on: ubuntu-latest - strategy: - matrix: - go_versions: [ '1.23', '1.24' ] - fail-fast: false - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '${{ matrix.go_versions }}' - cache: true - - - name: Run tests - run: make test - - tests-race: - name: Tests with -race - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - cache: true - - - name: Run tests - run: go test ./... -count=1 -race - - staticcheck: - name: Staticcheck - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - cache: true - - - name: Install staticcheck - run: make staticcheck-install - - - name: Run staticcheck - run: make staticcheck-run - - gopls: - name: gopls check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.22' - cache: true - - - name: Install gopls - run: make gopls-install - - - name: Run gopls - run: make gopls-run - - fumpt: - name: Run gofumpt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - cache: true - - - name: Install gofumpt - run: make fumpt-install - - - name: Run gofumpt - run: | - make fumpt - git diff --exit-code --quiet diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml deleted file mode 100644 index bc94792d8..000000000 --- a/.forgejo/workflows/vulncheck.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Vulncheck - -on: - pull_request: - push: - branches: - - master - -jobs: - vulncheck: - name: Vulncheck - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - check-latest: true - - - name: Install govulncheck - run: go install golang.org/x/vuln/cmd/govulncheck@latest - - - name: Run govulncheck - run: govulncheck ./... diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index aa9391657..000000000 --- a/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -/**/*.pb.go -diff -merge -/**/*.pb.go linguist-generated=true -/go.sum -diff diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 42b7679cd..000000000 --- a/.gitignore +++ /dev/null @@ -1,51 +0,0 @@ -# IDE -.idea -.vscode - -# Vendoring -vendor - -# tempfiles -.DS_Store -*~ -.cache - -temp -tmp - -# binary -bin/ -release/ - -# coverage -coverage.txt -coverage.html - -# testing -cmd/test -/plugins/ -testfile - -# misc -.neofs-cli.yml - -# debhelpers -debian/*debhelper* - -# logfiles -debian/*.log - -# .substvars -debian/*.substvars - -# .bash-completion -debian/*.bash-completion - -# Install folders and files -debian/frostfs-cli/ -debian/frostfs-ir/ -debian/files -debian/frostfs-storage/ -debian/changelog -man/ -debs/ diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index e3ec09f60..000000000 --- a/.golangci.yml +++ /dev/null @@ -1,107 +0,0 @@ -version: "2" -run: - tests: false -output: - formats: - tab: - path: stdout - colors: false -linters: - default: none - enable: - - bidichk - - containedctx - - contextcheck - - copyloopvar - - durationcheck - - errcheck - - exhaustive - - funlen - - gocognit - - gocritic - - godot - - importas - - ineffassign - - intrange - - misspell - - perfsprint - - predeclared - - protogetter - - reassign - - revive - - staticcheck - - testifylint - - truecloudlab-linters - - unconvert - - unparam - - unused - - usetesting - - whitespace - settings: - exhaustive: - default-signifies-exhaustive: true - funlen: - lines: 80 - statements: 60 - gocognit: - min-complexity: 40 - gocritic: - disabled-checks: - - ifElseChain - importas: - alias: - - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - no-unaliased: true - no-extra-aliases: false - staticcheck: - checks: - - all - - -QF1002 - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs - disable-packages: - - codes - - err - - res - - exec - target-methods: - - reportFlushError - - reportError - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gci - - gofmt - - goimports - settings: - gci: - sections: - - standard - - default - custom-order: true - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index d2d90fa5c..000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,56 +0,0 @@ -ci: - autofix_prs: false - -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 - hooks: - - id: check-added-large-files - - id: check-case-conflict - - id: check-executables-have-shebangs - - id: check-shebang-scripts-are-executable - - id: check-merge-conflict - - id: check-json - - id: check-xml - - id: check-yaml - - id: trailing-whitespace - args: [--markdown-linebreak-ext=md] - - id: end-of-file-fixer - exclude: "(.key|.svg)$" - - - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.9.0.6 - hooks: - - id: shellcheck - - - repo: local - hooks: - - id: make-lint - name: Run Make Lint - entry: make lint - language: system - pass_filenames: false - - - repo: local - hooks: - - id: go-unit-tests - name: go unit tests - entry: make test GOFLAGS='' - pass_filenames: false - types: [go] - language: system - - - repo: local - hooks: - - id: gofumpt - name: gofumpt - entry: make fumpt - pass_filenames: false - types: [go] - language: system - - - repo: https://github.com/TekWizely/pre-commit-golang - rev: v1.0.0-rc.1 - hooks: - - id: go-staticcheck-repo-mod - - id: go-mod-tidy diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 92c84ab16..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,258 +0,0 @@ -# Changelog -Changelog for FrostFS Node - -## [Unreleased] - -### Added -### Changed -### Fixed -### Removed -### Updated - -## [v0.44.0] - 2024-25-11 - Rongbuk - -### Added -- Allow to prioritize nodes during GET traversal via attributes (#1439) -- Add metrics for the frostfsid cache (#1464) -- Customize constant attributes attached to every tracing span (#1488) -- Manage additional keys in the `frostfsid` contract (#1505) -- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519) - -### Changed -- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396) -- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515) - -### Fixed -- Fix EC object search (#1408) -- Fix EC object put when one of the nodes is unavailable (#1427) - -### Removed -- Drop most of the eACL-related code (#1425) -- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483) - -### Upgrading from v0.43.0 -The metabase schema has changed completely, resync is required. - -## [v0.42.0] - -### Added -- Add audit logs for gRPC requests (#1184) -- Add CLI command to convert eACL to APE (#1189) -- Add `--await` flag to `control set-status` (#60) -- `app_info` metric for binary version (#1154) -- `--quiet` flag for healthcheck command (#1209) - -### Changed -- Deprecate Container.SetEACL RPC (#1219) - -### Fixed -- Take groups into account during APE processing (#1190) -- Handle double SIGHUP correctly (#1145) -- Handle empty filenames in tree listing (#1074) -- Handle duplicate tree nodes in the split-brain scenario (#1234, #1251) -- Remove APE pre-check in Object.GET/HEAD/RANGE RPC (#1249) -- Delete EC gc marks and split info (#1257) -- Do not search for non-existent objects on deletion (#1261) - -### Updated -- Make putting EC chunks more robust (#1233) - -## [v0.41.0] - -### Added -- Support mTLS for morph client (#1170) - -### Fixed -- Update shard state metric during shard init (#1174) -- Handle ENOSPC in blobovnicza (#1166) -- Handle multiple split-infos for EC objects (#1163) -- Set `Disabled` mode as the default for components (#1168) - -## [v0.40.0] - -### Added -- Support EC chunk reconstruction in policer (#1129) -- Support LOCK, DELETE and SEARCH methods on EC objects (#1147, 1144) -- apemanager service to manage APE chains (#1105) - -### Fixed -- Properly verify GetRangeHash response (#1083) -- Send `MONOTONIC_USEC` in sdnotify on reload (#1135) - -### Updated -- neo-go to `v0.106.0` - -## [v0.39.0] - -### Added -- Preliminary erasure coding support (#1065, #1112, #1103, #1120) -- TTL cache for blobovnicza tree (#1004) -- Cache for frostfsid and policy contracts (#1117) -- Writecache path to metric labels (#966) -- Documentation for authentication mechanisms (#1097, #1104) -- Metrics for metabase resync status (#1029) - -### Changed -- Speed up metabase resync (#1024) - -### Fixed -- Possible panic in GET_RANGE (#1077) - -### Updated -- Minimum required Go version to 1.21 - -## [v0.38.0] - -### Added -- Add `trace_id` to logs in `frostfs-node` (#146) -- Allow to forcefully remove container from IR (#733) -- LOKI support (#740) -- Allow sealing writecache (#569) -- Support tree service in data evacuation (#947) -- Use new policy engine mechanism for access control (#770, #804) -- Log about active notary deposit waiting (#963) - -### Changed -- Sort output in `frostfs-cli` subcommands (#333) -- Send bootstrap query at each epoch tick (#721) -- Do not retain garbage in fstree on systems supporting O_TMPFILE (#970) - -### Fixed -- Handle synchronization failures better in tree service (#741) -- Fix invalid batch size for iterator traversal in morph (#1000) - -### Updated -- `neo-go` to `v0.105.0` - -## [v0.37.0] - -### Added -- Support impersonate bearer token (#229) -- Change log level on SIGHUP for ir (#125) -- Reload pprof and metrics on SIGHUP for ir (#125) -- Support copies number parameter in `frostfs-cli object put` (#351) -- Set extra wallets on SIGHUP for ir (#125) -- Writecache metrics (#312) -- Add tree service metrics (#370) - -### Changed -- `frostfs-cli util locode generate` is now much faster (#309) -### Fixed -- Take network settings into account during netmap contract update (#100) -- Read config files from dir even if config file not provided via `--config` for node (#238) -- Notary requests parsing according to `neo-go`'s updates (#268) -- Tree service panic in its internal client cache (#322) -- Iterate over endpoints when create ws client in morph's constructor (#304) -- Delete complex objects with GC (#332) - -### Removed -### Updated -- `neo-go` to `v0.101.1` -- `google.golang.org/grpc` to `v1.55.0` -- `paulmach/orb` to `v0.9.2` -- `go.etcd.io/bbolt` to `v1.3.7` -- `github.com/nats-io/nats.go` to `v1.25.0` -- `golang.org/x/sync` to `v0.2.0` -- `golang.org/x/term` to `v0.8.0` -- `github.com/spf13/cobra` to `v1.7.0` -- `github.com/panjf2000/ants/v2` `v2.7.4` -- `github.com/multiformats/go-multiaddr` to `v0.9.0` -- `github.com/hashicorp/golang-lru/v2` to `v2.0.2` -- `go.uber.org/atomic` to `v1.11.0` -- Minimum go version to v1.20 -- `github.com/prometheus/client_golang` to `v1.15.1` -- `github.com/prometheus/client_model` to `v0.4.0` -- `go.opentelemetry.io/otel` to `v1.15.1` -- `go.opentelemetry.io/otel/trace` to `v1.15.1` -- `github.com/spf13/cast` to `v1.5.1` -- `git.frostfs.info/TrueCloudLab/hrw` to `v1.2.1` - -### Updating from v0.36.0 - -## [v0.36.0] - 2023-04-12 - Furtwängler - -### Added -- Add GAS pouring mechanism for a configurable list of wallets (#128) -- Separate batching for replicated operations over the same container in pilorama (#1621) -- Doc for extended headers (#2128) -- New `frostfs_node_object_container_size` metric for tracking size of reqular objects in a container (#2116) -- New `frostfs_node_object_payload_size` metric for tracking size of reqular objects on a single shard (#1794) -- Add command `frostfs-adm morph netmap-candidates` (#1889) -- `object.delete.tombstone_lifetime` config parameter to set tombstone lifetime in the DELETE service (#2246) -- Reload config for pprof and metrics on SIGHUP in `neofs-node` (#1868) -- Multiple configs support (#44) -- Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37) -- Tree service now saves the last synchronization height which persists across restarts (#82) -- Add tracing support (#135) -- Multiple (and a fix for single) copies number support for `PUT` requests (#221) - -### Changed -- Change `frostfs_node_engine_container_size` to counting sizes of logical objects -- `common.PrintVerbose` prints via `cobra.Command.Printf` (#1962) -- Env prefix in configuration changed to `FROSTFS_*` (#43) -- Link object is broadcast throughout the whole container now (#57) -- Pilorama now can merge multiple batches into one (#2231) -- Storage engine now can start even when some shard components are unavailable (#2238) -- `neofs-cli` buffer for object put increased from 4 KiB to 3 MiB (#2243) -- Expired locked object is available for reading (#56) -- Initialize write-cache asynchronously (#32) -- Update system attribute names (#159) - -### Fixed -- Increase payload size metric on shards' `put` operation (#1794) -- Big object removal with non-local parts (#1978) -- Disable pilorama when moving to degraded mode (#2197) -- Fetching blobovnicza objects that not found in write-cache (#2206) -- Do not search for the small objects in FSTree (#2206) -- Correct status error for expired session token (#2207) -- Set flag `mode` required for `frostfs-cli control shards set-mode` (#8) -- Fix `dirty` suffix in debian package version (#53) -- Prevent node process from killing by systemd when shutting down (#1465) -- Restore subscriptions correctly on morph client switch (#2212) -- Expired objects could be returned if not marked with GC yet (#2213) -- `neofs-adm morph dump-hashes` now properly iterates over custom domain (#2224) -- Possible deadlock in write-cache (#2239) -- Fix `*_req_count` and `*_req_count_success` metric values (#2241) -- Storage ID update by write-cache (#2244) -- `neo-go` client deadlock on subscription (#2244, #2272) -- Possible panic during write-cache initialization (#2234) -- Do not fetch an object if `meta` is missing it (#61) -- Create contract wallet only by `init` and `update-config` command (#63) -- Actually use `object.put.pool_size_local` and independent pool for local puts (#64). -- Pretty printer of basic ACL in the NeoFS CLI (#2259) -- Adding of public key for nns group `group.frostfs` at init step (#130) -- Iterating over just removed files by FSTree (#98) -- Parts of a locked object could not be removed anymore (#141) -- Non-alphabet nodes do not try to handle alphabet events (#181) -- Failing SN and IR transactions because of incorrect scopes (#2230, #2263) -- Global scope used for some transactions (#2230, #2263) -- Concurrent morph cache misses (#30) - -### Removed -### Updated -- `neo-go` to `v0.100.1` -- `github.com/klauspost/compress` to `v1.15.13` -- `github.com/multiformats/go-multiaddr` to `v0.8.0` -- `golang.org/x/term` to `v0.3.0` -- `google.golang.org/grpc` to `v1.52.0` -- `github.com/spf13/viper` to `v1.15.0` -- `github.com/nats-io/nats.go` to `v1.22.1` -- `github.com/TrueCloudLab/hrw` to `v.1.1.1` -- Minimum go version to v1.18 - -### Updating from v0.35.0 (old NeoFS) - -You need to change configuration environment variables to `FROSTFS_*` if you use any. - -New config field `object.delete.tombstone_lifetime` allows to set tombstone lifetime -more appropriate for a specific deployment. - -Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__` -(existed objects with old attributes will be treated as before, but for new objects new attributes will be used). - -## Older versions - -This project is a fork of [NeoFS](https://github.com/nspcc-dev/neofs-node) from version v0.35.0. -To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-node/blob/master/CHANGELOG.md. - -[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-node/compare/98e48b68514127afc291b8a8ff6b12838ed1cb5c...master diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index d19c96a5c..000000000 --- a/CODEOWNERS +++ /dev/null @@ -1,3 +0,0 @@ -.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers -.forgejo/.* @potyarkin -Makefile @potyarkin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 53ff7c8df..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,156 +0,0 @@ -# Contribution guide - -First, thank you for contributing! We love and encourage pull requests from -everyone. Please follow the guidelines: - -- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-node/issues) and - [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-node/pulls) for existing - discussions. - -- Open an issue first, to discuss a new feature or enhancement. - -- Write tests, and make sure the test suite passes locally and on CI. - -- Open a pull request, and reference the relevant issue(s). - -- Make sure your commits are logically separated and have good comments - explaining the details of your change. - -- After receiving feedback, amend your commits or add new ones as - appropriate. - -- **Have fun!** - -## Development Workflow - -Start by forking the `frostfs-node` repository, make changes in a branch and then -send a pull request. We encourage pull requests to discuss code changes. Here -are the steps in details: - -### Set up your Forgejo repository -Fork [FrostFS node upstream](https://git.frostfs.info/TrueCloudLab/frostfs-node) source -repository to your own personal repository. Copy the URL of your fork (you will -need it for the `git clone` command below). - -```sh -$ git clone https://git.frostfs.info/TrueCloudLab/frostfs-node -``` - -### Set up git remote as ``upstream`` -```sh -$ cd frostfs-node -$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-node -$ git fetch upstream -$ git merge upstream/master -... -``` - -### Create your feature branch -Before making code changes, make sure you create a separate branch for these -changes. Maybe you will find it convenient to name branch in -`/-` format. - -``` -$ git checkout -b feature/123-something_awesome -``` - -### Test your changes -After your code changes, make sure - -- To add test cases for the new code. -- To run `make lint` and `make staticcheck-run` -- To squash your commits into a single commit or a series of logically separated - commits run `git rebase -i`. It's okay to force update your pull request. -- To run `make test` and `make all` completes. - -### Commit changes -After verification, commit your changes. This is a [great -post](https://chris.beams.io/posts/git-commit/) on how to write useful commit -messages. Try following this template: - -``` -[#Issue] Summary - -Description - - - - -``` - -``` -$ git commit -sam '[#123] Add some feature' -``` - -### Push to the branch -Push your locally committed changes to the remote origin (your fork) -``` -$ git push origin feature/123-something_awesome -``` - -### Create a Pull Request -Pull requests can be created via Forgejo. Refer to [this -document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for -detailed steps on how to create a pull request. After a Pull Request gets peer -reviewed and approved, it will be merged. - -## DCO Sign off - -All authors to the project retain copyright to their work. However, to ensure -that they are only submitting work that they have rights to, we are requiring -everyone to acknowledge this by signing their work. - -Any copyright notices in this repository should specify the authors as "the -contributors". - -To sign your work, just add a line like this at the end of your commit message: - -``` -Signed-off-by: Samii Sakisaka - -``` - -This can easily be done with the `--signoff` option to `git commit`. - -By doing this you state that you can certify the following (from [The Developer -Certificate of Origin](https://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` diff --git a/CREDITS.md b/CREDITS.md deleted file mode 100644 index ce4050b71..000000000 --- a/CREDITS.md +++ /dev/null @@ -1,33 +0,0 @@ -# Credits - -FrostFS continues the development of NeoFS. - -Initial NeoFS research and development (2018-2020) was done by -[NeoSPCC](https://nspcc.ru) team. - -In alphabetical order: - -- Alexey Vanin -- Anastasia Prasolova -- Anatoly Bogatyrev -- Evgeny Kulikov -- Evgeny Stratonikov -- Leonard Liubich -- Sergei Liubich -- Stanislav Bogatyrev - -# Contributors - -In chronological order: -- Pavel Karpy -- Zhang Tao -- Angira Kekteeva -- Sergio Nemirowski -- Tivizi Jing - -# Special Thanks - -For product development support: - -- Fabian Wahle -- Neo Global Development diff --git a/LICENSE b/LICENSE deleted file mode 100644 index f288702d2..000000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/Makefile b/Makefile deleted file mode 100755 index 575eaae6f..000000000 --- a/Makefile +++ /dev/null @@ -1,322 +0,0 @@ -#!/usr/bin/make -f -SHELL = bash -.SHELLFLAGS = -euo pipefail -c - -REPO ?= $(shell go list -m) -VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") - -HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs -HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" - -GO_VERSION ?= 1.23 -LINT_VERSION ?= 2.0.2 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 -PROTOC_VERSION ?= 25.0 -PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) -PROTOC_OS_VERSION=osx-x86_64 -ifeq ($(shell uname), Linux) - PROTOC_OS_VERSION=linux-x86_64 -endif -STATICCHECK_VERSION ?= 2025.1.1 -ARCH = amd64 - -BIN = bin -RELEASE = release -DIRS = $(BIN) $(RELEASE) - -# List of binaries to build. -CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*))) -BINS = $(addprefix $(BIN)/, $(CMDS)) - -OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters -LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION) -TMP_DIR := .cache -PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf -PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION) -PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION) -STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck -STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION) - -SOURCES = $(shell find . -type f -name "*.go" -print) - -GOFUMPT_VERSION ?= v0.7.0 -GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt -GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) - -GOPLS_VERSION ?= v0.17.1 -GOPLS_DIR ?= $(abspath $(BIN))/gopls -GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) -GOPLS_TEMP_FILE := $(shell mktemp) - -FROSTFS_CONTRACTS_PATH=$(abspath ./../frostfs-contract) -LOCODE_DB_PATH=$(abspath ./.cache/locode_db) -LOCODE_DB_VERSION=v0.4.0 - -.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint - prepare-release pre-commit unpre-commit - -# To build a specific binary, use it's name prefix with bin/ as a target -# For example `make bin/frostfs-node` will build only storage node binary -# Just `make` will build all possible binaries -all: $(DIRS) $(BINS) - -# help target -include help.mk - -$(BINS): $(DIRS) dep - @echo "⇒ Build $@" - CGO_ENABLED=0 \ - go build -v -trimpath \ - -ldflags "-X $(REPO)/misc.Version=$(VERSION)" \ - -o $@ ./cmd/$(notdir $@) - -$(DIRS): - @echo "⇒ Ensure dir: $@" - @mkdir -p $@ - -# Prepare binaries and archives for release -.ONESHELL: -prepare-release: docker/all - @for file in `ls -1 $(BIN)/frostfs-*`; do - cp $$file $(RELEASE)/`basename $$file`-$(ARCH) - strip $(RELEASE)/`basename $$file`-$(ARCH) - tar -czf $(RELEASE)/`basename $$file`-$(ARCH).tar.gz $(RELEASE)/`basename $$file`-$(ARCH) - done - -# Pull go dependencies -dep: - @printf "⇒ Download requirements: " - CGO_ENABLED=0 \ - go mod download && echo OK - @printf "⇒ Tidy requirements : " - CGO_ENABLED=0 \ - go mod tidy -v && echo OK - -# Build export-metrics -export-metrics: dep - @printf "⇒ Build export-metrics\n" - CGO_ENABLED=0 \ - go build -v -trimpath -o bin/export-metrics ./scripts/export-metrics - -# Regenerate proto files: -protoc: - @if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \ - make protoc-install; \ - fi - @for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \ - echo "⇒ Processing $$f "; \ - $(PROTOC_DIR)/bin/protoc \ - --proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \ - --plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \ - --go-frostfs_out=. --go-frostfs_opt=paths=source_relative \ - --go-grpc_opt=require_unimplemented_servers=false \ - --go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \ - done - -# Install protoc -protoc-install: - @rm -rf $(PROTOBUF_DIR) - @mkdir -p $(PROTOBUF_DIR) - @echo "⇒ Installing protoc... " - @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' - @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) - @rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip - @echo "⇒ Instaling protogen FrostFS plugin..." - @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/protogen@$(PROTOGEN_FROSTFS_VERSION) - -# Build FrostFS component's docker image -image-%: - @echo "⇒ Build FrostFS $* docker image " - @docker build \ - --build-arg REPO=$(REPO) \ - --build-arg VERSION=$(VERSION) \ - --rm \ - -f .docker/Dockerfile.$* \ - -t $(HUB_IMAGE)-$*:$(HUB_TAG) . - -# Build all Docker images -images: image-storage image-ir image-cli image-adm - -# Build dirty local Docker images -dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm - -# Push FrostFS components' docker image to the registry -push-image-%: - @echo "⇒ Publish FrostFS $* docker image " - @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) - -# Push all Docker images to the registry -.PHONY: push-images -push-images: push-image-storage push-image-ir push-image-cli push-image-adm - -# Run `make %` in Golang container -docker/%: - docker run --rm -t \ - -v `pwd`:/src \ - -w /src \ - -u "$$(id -u):$$(id -g)" \ - --env HOME=/src \ - golang:$(GO_VERSION) make $* - - -# Run all code formatters -fmts: fumpt imports - -# Reformat imports -imports: - @echo "⇒ Processing goimports check" - @goimports -w cmd/ pkg/ misc/ - -# Install gofumpt -fumpt-install: - @rm -rf $(GOFUMPT_DIR) - @mkdir -p $(GOFUMPT_DIR) - @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) - -# Run gofumpt -fumpt: - @if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \ - make fumpt-install; \ - fi - @echo "⇒ Processing gofumpt check" - $(GOFUMPT_VERSION_DIR)/gofumpt -l -w cmd/ pkg/ misc/ - -# Run Unit Test with go test -test: GOFLAGS ?= "-count=1" -test: - @echo "⇒ Running go test" - @GOFLAGS="$(GOFLAGS)" go test ./... - -# Install Gerrit commit-msg hook -review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks -review-install: - @git config remote.review.url \ - || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node - @mkdir -p $(GIT_HOOK_DIR)/ - @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg - @chmod +x $(GIT_HOOK_DIR)/commit-msg - @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg - @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg - -# Create a PR in Gerrit -review: BRANCH ?= master -review: - @git push review HEAD:refs/for/$(BRANCH) \ - --push-option r=e.stratonikov@yadro.com \ - --push-option r=d.stepanov@yadro.com \ - --push-option r=an.nikiforov@yadro.com \ - --push-option r=a.arifullin@yadro.com \ - --push-option r=ekaterina.lebedeva@yadro.com \ - --push-option r=a.savchuk@yadro.com \ - --push-option r=a.chuprov@yadro.com - -# Run pre-commit -pre-commit-run: - @pre-commit run -a --hook-stage manual - -# Install linters -lint-install: $(BIN) - @rm -rf $(OUTPUT_LINT_DIR) - @mkdir -p $(OUTPUT_LINT_DIR) - @mkdir -p $(TMP_DIR) - @rm -rf $(TMP_DIR)/linters - @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters - @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) - @rm -rf $(TMP_DIR)/linters - @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) - -# Run linters -lint: - @if [ ! -d "$(LINT_DIR)" ]; then \ - make lint-install; \ - fi - $(LINT_DIR)/golangci-lint run - -# Install staticcheck -staticcheck-install: - @rm -rf $(STATICCHECK_DIR) - @mkdir -p $(STATICCHECK_DIR) - @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) - -# Run staticcheck -staticcheck-run: - @if [ ! -d "$(STATICCHECK_VERSION_DIR)" ]; then \ - make staticcheck-install; \ - fi - @$(STATICCHECK_VERSION_DIR)/staticcheck ./... - -# Install gopls -gopls-install: - @rm -rf $(GOPLS_DIR) - @mkdir -p $(GOPLS_DIR) - @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) - -# Run gopls -gopls-run: - @if [ ! -d "$(GOPLS_VERSION_DIR)" ]; then \ - make gopls-install; \ - fi - $(GOPLS_VERSION_DIR)/gopls check $(SOURCES) 2>&1 >$(GOPLS_TEMP_FILE) - @if [[ $$(wc -l < $(GOPLS_TEMP_FILE)) -ne 0 ]]; then \ - cat $(GOPLS_TEMP_FILE); \ - exit 1; \ - fi - rm $(GOPLS_TEMP_FILE) - -# Run linters in Docker -docker/lint: - docker run --rm -t \ - -v `pwd`:/src \ - -u `stat -c "%u:%g" .` \ - --env HOME=/src \ - golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint' - -# Activate pre-commit hooks -pre-commit: - pre-commit install -t pre-commit -t commit-msg - -# Deactivate pre-commit hooks -unpre-commit: - pre-commit uninstall -t pre-commit -t commit-msg - -# Print version -version: - @echo $(VERSION) - -# Delete built artifacts -clean: - rm -rf .cache - rm -rf $(BIN) - rm -rf $(RELEASE) - -# Download locode database -locode-download: - mkdir -p $(TMP_DIR) - @wget -q -O ./$(TMP_DIR)/locode_db.gz 'https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/download/${LOCODE_DB_VERSION}/locode_db.gz' - gzip -dfk ./$(TMP_DIR)/locode_db.gz - -# Start dev environment -env-up: all - docker compose -f dev/docker-compose.yml up -d - @if [ ! -d "$(FROSTFS_CONTRACTS_PATH)" ]; then \ - echo "Frostfs contracts not found"; exit 1; \ - fi - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ - --storage-wallet ./dev/storage/wallet01.json \ - --storage-wallet ./dev/storage/wallet02.json \ - --storage-wallet ./dev/storage/wallet03.json \ - --storage-wallet ./dev/storage/wallet04.json - - @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ - make locode-download; \ - fi - mkdir -p ./$(TMP_DIR)/state - mkdir -p ./$(TMP_DIR)/storage - -# Shutdown dev environment -env-down: - docker compose -f dev/docker-compose.yml down -v - rm -rf ./$(TMP_DIR)/state - rm -rf ./$(TMP_DIR)/storage diff --git a/README.md b/README.md index 0109ed0e5..7463f9e17 100644 --- a/README.md +++ b/README.md @@ -1,134 +1,3 @@ -

- FrostFS -

+# WIP area: this repo is just a fork! -

- FrostFS is a decentralized distributed object storage integrated with the NEO Blockchain. -

- ---- -[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node) -![Release (latest)](https://git.frostfs.info/TrueCloudLab/frostfs-node/badges/release.svg) - -# Overview - -FrostFS Nodes are organized in a peer-to-peer network that takes care of storing -and distributing user's data. Any Neo user may participate in the network and -get paid for providing storage resources to other users or store their data in -FrostFS and pay a competitive price for it. - -Users can reliably store object data in the FrostFS network and have a transparent -data placement process due to a decentralized architecture and flexible storage -policies. Each node is responsible for executing the storage policies that the -users select for geographical location, reliability level, number of nodes, type -of disks, capacity, etc. Thus, FrostFS gives full control over data to users. - -Deep [Neo Blockchain](https://neo.org) integration allows FrostFS to be used by -dApps directly from -[NeoVM](https://docs.neo.org/docs/en-us/basic/technology/neovm.html) on the -[Smart Contract](https://docs.neo.org/docs/en-us/intro/glossary.html) -code level. This way dApps are not limited to on-chain storage and can -manipulate large amounts of data without paying a prohibitive price. - -FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has -protocol gateways for popular protocols such as [AWS -S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw), -[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw), -[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and -[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing -developers to integrate applications without rewriting their code. - -# Supported platforms - -Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More -platforms will be officially supported after release `1.0`. - -The latest version of frostfs-node works with frostfs-contract -[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2). - -# Building - -To make all binaries you need Go 1.22+ and `make`: -``` -make all -``` -The resulting binaries will appear in `bin/` folder. - -To make a specific binary use: -``` -make bin/frostfs- -``` -See the list of all available commands in the `cmd` folder. - -## Building with Docker - -Building can also be performed in a container: -``` -make docker/all # build all binaries -make docker/bin/frostfs- # build a specific binary -``` - -## Docker images - -To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use: -``` -make images -``` - -# Debugging - -## VSCode - -To run and debug single node cluster with VSCode: - -1. Clone and build [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract) repository to the same directory level as `frostfs-node`. For example: - -``` -/ -├── src - ├── frostfs-node - └── frostfs-contract -``` -See `frostfs-contract`'s README.md for build instructions. - -2. Copy `launch.json` and `tasks.json` from `dev/.vscode-example` directory to `.vscode` directory. If you already have such files in `.vscode` directory, then merge them manually. - -3. Go to **Run and Debug** (`Ctrl+Shift+D`) and start `IR+Storage node` configuration. - -4. To create container and put object into it run (container and object IDs will be different): - -``` -./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await -Enter password > <- press ENTER, the is no password for wallet -CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju - -./bin/frostfs-cli object put -r 127.0.0.1:8080 --wallet ./dev/wallet.json --file README.md --cid CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju -Enter password > - 4300 / 4300 [===========================================================================================================================================================================================================] 100.00% 0s -[README.md] Object successfully stored - OID: 78sohnudVMnPsczXqsTUcvezosan2YDNVZwDE8Kq5YwU - CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju - -./bin/frostfs-cli object get -r 127.0.0.1:8080 --wallet ./dev/wallet.json --cid CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju --oid 78sohnudVMnPsczXqsTUcvezosan2YDNVZwDE8Kq5YwU -... - -``` - -# Contributing - -Feel free to contribute to this project after reading the [contributing -guidelines](CONTRIBUTING.md). - -Before starting to work on a certain topic, create a new issue first, describing -the feature/topic you are going to implement. - -# Credits - -FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and -contributions from community members. - -Please see [CREDITS](CREDITS.md) for details. - -# License - -- [GNU General Public License v3.0](LICENSE) +Useful things may be published only in [other branches](../../../branches) diff --git a/VERSION b/VERSION deleted file mode 100644 index 9052dab96..000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -v0.44.0 diff --git a/cmd/frostfs-adm/README.md b/cmd/frostfs-adm/README.md deleted file mode 100644 index 3dfcc8781..000000000 --- a/cmd/frostfs-adm/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# FrostFS Admin Tool - -## Overview - -Admin tool provides an easier way to deploy and maintain private installation -of FrostFS. Private installation provides a set of N3 consensus nodes, FrostFS -Alphabet, and Storage nodes. Admin tool generates consensus keys, initializes -the sidechain, and provides functions to update the network and register new -Storage nodes. - -## Build - -To build binary locally, use `make bin/frostfs-adm` command. - -For clean build inside a docker container, use `make docker/bin/frostfs-adm`. - -Build docker image with `make image-adm`. - -At FrostFS private install deployment, frostfs-adm requires compiled FrostFS -contracts. Find them in the latest release of -[frostfs-contract repository](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases). - -## Commands - -### Config - -Config section provides `init` command that creates a configuration file for -private installation deployment and updates. Config file is optional, all -parameters can be passed by arguments or read from standard input (wallet -passwords). - -Config example: -```yaml -rpc-endpoint: https://address:port # sidechain RPC node endpoint -alphabet-wallets: /path # path to consensus node / alphabet wallets storage -network: - max_object_size: 67108864 # max size of a single FrostFS object, bytes - epoch_duration: 240 # duration of a FrostFS epoch in blocks, consider block generation frequency in the sidechain - fee: - candidate: 0 # inner ring candidate registration fee, for private installation consider 0 - container: 0 # container creation fee, for private installation consider 0 - container_alias: 0 # container nice-name registration fee, for private installation consider 0 - withdraw: 0 # withdraw fee, for private installation consider 0 -credentials: # passwords for consensus node / alphabet wallets - az: password1 - buky: password2 - vedi: password3 - glagoli: password4 - dobro: password5 - yest: password6 - zhivete: password7 -``` - -### Morph - -#### Network deployment - -- `generate-alphabet` generates a set of wallets for consensus and - Alphabet nodes. The list of the name for alphabet wallets(no gaps between names allowed, order is important): - - az, buky, vedi, glagoli, dobro, yest, zhivete, dzelo, zemlja, izhe, izhei, gerv, kako, ljudi, mislete, nash, on, pokoj, rtsi, slovo, tverdo, uk - -- `init` initializes the sidechain by deploying smart contracts and - setting provided FrostFS network configuration. - -- `generate-storage-wallet` generates a wallet for the Storage node that - is ready for deployment. It also transfers a bit of sidechain GAS, so this - wallet can be used for FrostFS bootstrap. - -#### Network maintenance - -- `set-config` add/update configuration values in the Netmap contract. - -- `force-new-epoch` increments FrostFS epoch number and executes new epoch - handlers in FrostFS nodes. - -- `refill-gas` transfers sidechain GAS to the specified wallet. - -- `update-contracts` updates contracts to a new version. - -#### Container migration - -If a network has to be redeployed, these commands will migrate all container meta -info. These commands **do not migrate actual objects**. - -- `dump-containers` saves all containers and metadata registered in the container - contract to a file. - -- `restore-containers` restores previously saved containers by their repeated registration in - the container contract. - -- `list-containers` output all containers ids. - -#### Network info - -- `dump-config` prints FrostFS network configuration. - -- `dump-hashes` prints FrostFS contract addresses stored in NNS. - - -## Private network deployment - -Read step-by-step guide of private storage deployment [in docs](./docs/deploy.md). diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md deleted file mode 100644 index b4b1ed8e4..000000000 --- a/cmd/frostfs-adm/docs/deploy.md +++ /dev/null @@ -1,215 +0,0 @@ -# Step-by-step private FrostFS deployment - -This is a short guide on how to deploy a private FrostFS storage network on bare -metal without docker images. This guide does not cover details on how to start -consensus, Alphabet, or Storage nodes. This guide covers only `frostfs-adm` -related configuration details. - -## Prerequisites - -To follow this guide you need: -- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment), -- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment), -- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment). - -## Step 1: Prepare network configuration - -To start a network, you need a set of consensus nodes, the same number of -Alphabet nodes and any number of Storage nodes. While the number of Storage -nodes can be scaled almost infinitely, the number of consensus and Alphabet -nodes can't be changed so easily right now. Consider this before going any further. -Note also that there is an upper limit on the number of alphabet nodes (currently 22). - -It is easier to use`frostfs-adm` with a predefined configuration. First, create -a network configuration file. In this example, there is going to be only one -consensus / Alphabet node in the network. - -``` -$ frostfs-adm config init --path foo.network.yml -Initial config file saved to foo.network.yml - -$ cat foo.network.yml -rpc-endpoint: https://neo.rpc.node:30333 -alphabet-wallets: /home/user/deploy/alphabet-wallets -network: - max_object_size: 67108864 - epoch_duration: 240 - max_ec_data_count: 12 - max_ec_parity_count: 4 - fee: - candidate: 0 - container: 0 - withdraw: 0 -credentials: - az: hunter2 -``` - -For private installation, it is recommended to set all **fees** and **basic -income rate** to 0. - -As for **epoch duration**, consider consensus node block generation frequency. -With default 15 seconds per block, 240 blocks are going to be a 1-hour epoch. - -For **max object size**, 67108864 (64 MiB) or 134217728 (128 MiB) should provide -good chunk distribution in most cases. - -With this config, generate wallets (private keys) of consensus nodes. The same -wallets will be used for Alphabet nodes. Make sure, that dir for alphabet -wallets already exists. - -``` -$ frostfs-adm -c foo.network.yml morph generate-alphabet --size 1 -size: 1 -alphabet-wallets: /home/user/deploy/alphabet-wallets -wallet[0]: hunter2 -``` - -This command generates wallets with the following names: - - az, buky, vedi, glagoli, dobro, yest, zhivete, dzelo, zemlja, izhe, izhei, gerv, kako, ljudi, mislete, nash, on, pokoj, rtsi, slovo, tverdo, uk - -No gaps between names allowed, order is important. - -Do not lose wallet files and network config. Store it in an encrypted backed up -storage. - -## Step 2: Launch consensus nodes - -Configure blockchain nodes with the generated wallets from the previous step. -Config examples can be found in -[neo-go repository](https://github.com/nspcc-dev/neo-go/tree/master/config). - -Gather public keys from **all** generated wallets. We are interested in the first -`simple signature contract` public key. - -``` -$ neo-go wallet dump-keys -w alphabet-wallets/az.json -NitdS4k4f1Hh5mbLJhAswBK3WC2gQgPN1o (simple signature contract): -02c1cc85f9c856dbe2d02017349bcb7b4e5defa78b8056a09b3240ba2a8c078869 - -NiMKabp3ddi3xShmLAXhTfbnuWb4cSJT6E (1 out of 1 multisig contract): -02c1cc85f9c856dbe2d02017349bcb7b4e5defa78b8056a09b3240ba2a8c078869 - -NiMKabp3ddi3xShmLAXhTfbnuWb4cSJT6E (1 out of 1 multisig contract): -02c1cc85f9c856dbe2d02017349bcb7b4e5defa78b8056a09b3240ba2a8c078869 -``` - -Put the list of public keys into `ProtocolConfiguration.StandbyCommittee` -section. Specify the wallet path and the password in `ApplicationConfiguration.P2PNotary` -and `ApplicationConfiguration.UnlockWallet` sections. If config includes -`ProtocolConfiguration.NativeActivations` section, add notary -contract `Notary: [0]`. - -```yaml -ProtocolConfiguration: - StandbyCommittee: - - 02c1cc85f9c856dbe2d02017349bcb7b4e5defa78b8056a09b3240ba2a8c078869 - NativeActivations: - Notary: [0] -ApplicationConfiguration: - P2PNotary: - Enabled: true - UnlockWallet: - Path: "/home/user/deploy/alphabet-wallets/az.json" - Password: "hunter2" - UnlockWallet: - Path: "/home/user/deploy/alphabet-wallets/az.json" - Password: "hunter2" -``` - -Then, launch consensus nodes. They should connect to each other and start -producing blocks in consensus. You might want to deploy additional RPC -nodes at this stage because Storage nodes should be connected to the chain too. -It is not recommended to use a consensus node as an RPC node due to security policies -and possible overload issues. - -## Step 3: Initialize sidechain - -Use archive with compiled FrostFS contracts to initialize the sidechain. - -``` -$ tar -xzvf frostfs-contract-v0.11.0.tar.gz - -$ ./frostfs-adm -c foo.network.yml morph init --contracts ./frostfs-contract-v0.11.0 -Stage 1: transfer GAS to alphabet nodes. -Waiting for transactions to persist... -Stage 2: set notary and alphabet nodes in designate contract. -Waiting for transactions to persist... -Stage 3: deploy NNS contract. -Waiting for transactions to persist... -Stage 4: deploy FrostFS contracts. -Waiting for transactions to persist... -Stage 4.1: Transfer GAS to proxy contract. -Waiting for transactions to persist... -Stage 5: register candidates. -Waiting for transactions to persist... -Stage 6: transfer NEO to alphabet contracts. -Waiting for transactions to persist... -Stage 7: set addresses in NNS. -Waiting for transactions to persist... -NNS: Set alphabet0.frostfs -> f692dfb4d43a15b464eb51a7041160fb29c44b6a -NNS: Set balance.frostfs -> 103519b3067a66307080a66570c0491ee8f68879 -NNS: Set container.frostfs -> cae60bdd689d185901e495352d0247752ce50846 -NNS: Set frostfsid.frostfs -> c421fb60a3895865a8f24d197d6a80ef686041d2 -NNS: Set netmap.frostfs -> 894eb854632f50fb124412ce7951ebc00763525e -NNS: Set proxy.frostfs -> ac6e6fe4b373d0ca0ca4969d1e58fa0988724e7d -Waiting for transactions to persist... -``` - -## Step 4: Launch Alphabet nodes - -Configure Alphabet nodes with the wallets generated in step 1. For -`morph.validators` use a list of public keys from -`ProtocolConfiguration.StandbyCommittee`. - -```yaml -wallet: - path: "/home/user/deploy/alphabet-wallets/az.json" - password: "hunter2" - account: "NitdS4k4f1Hh5mbLJhAswBK3WC2gQgPN1o" - -morph: - validators: - - 02c1cc85f9c856dbe2d02017349bcb7b4e5defa78b8056a09b3240ba2a8c078869 - -contracts: - alphabet: - amount: 1 -``` - -## Step 4: Launch Storage node - -Generate a new wallet for a Storage node. - -``` -$ frostfs-adm -c foo.network.yml morph generate-storage-wallet --storage-wallet ./sn01.json --initial-gas 10.0 -New password > -Waiting for transactions to persist... - -$ neo-go wallet dump-keys -w sn01.json -Ngr7p8Z9S22XDH6VkUG9oXobv8zZRAWwwv (simple signature contract): -0355eccb72cd46f09a3e5237eaa0f4949cceb5ecfa5a225bd3bb9fd021c4d75b85 -``` - -Configure the Storage node to use this wallet. - -``` -node: - wallet: - path: "/home/user/deploy/sn01.json" - address: "Ngr7p8Z9S22XDH6VkUG9oXobv8zZRAWwwv" - password: "foobar" -``` - -The storage node will be included in the network map in the next FrostFS epoch. To -speed up this process, you can increment epoch counter immediately. - -``` -$ frostfs-adm -c foo.network.yml morph force-new-epoch -Current epoch: 8, increase to 9. -Waiting for transactions to persist... -``` - ---- - -After that, FrostFS Storage is ready to work. You can access it directly or -with protocol gates. diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go deleted file mode 100644 index f194e97f5..000000000 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ /dev/null @@ -1,50 +0,0 @@ -package commonflags - -const ( - ConfigFlag = "config" - ConfigFlagShorthand = "c" - ConfigFlagUsage = "Config file" - - ConfigDirFlag = "config-dir" - ConfigDirFlagUsage = "Config directory" - - Verbose = "verbose" - VerboseShorthand = "v" - VerboseUsage = "Verbose output" - - EndpointFlag = "rpc-endpoint" - EndpointFlagDesc = "N3 RPC node endpoint" - EndpointFlagShort = "r" - - WalletPath = "wallet" - WalletPathShorthand = "w" - WalletPathUsage = "Path to the wallet" - - AlphabetWalletsFlag = "alphabet-wallets" - AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" - - AdminWalletPath = "wallet-admin" - AdminWalletUsage = "Path to the admin wallet" - - LocalDumpFlag = "local-dump" - ProtoConfigPath = "protocol" - ContractsInitFlag = "contracts" - ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)" - ContractsURLFlag = "contracts-url" - ContractsURLFlagDesc = "URL to archive with compiled FrostFS contracts" - EpochDurationInitFlag = "network.epoch_duration" - MaxObjectSizeInitFlag = "network.max_object_size" - MaxECDataCountFlag = "network.max_ec_data_count" - MaxECParityCounFlag = "network.max_ec_parity_count" - RefillGasAmountFlag = "gas" - StorageWalletFlag = "storage-wallet" - ContainerFeeInitFlag = "network.fee.container" - ContainerAliasFeeInitFlag = "network.fee.container_alias" - CandidateFeeInitFlag = "network.fee.candidate" - WithdrawFeeInitFlag = "network.fee.withdraw" - MaintenanceModeAllowedInitFlag = "network.maintenance_mode_allowed" - HomomorphicHashDisabledInitFlag = "network.homomorphic_hash_disabled" - CustomZoneFlag = "domain" - AlphabetSizeFlag = "size" - AllFlag = "all" -) diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go deleted file mode 100644 index 69153f0d7..000000000 --- a/cmd/frostfs-adm/internal/modules/config/config.go +++ /dev/null @@ -1,166 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "text/template" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -type configTemplate struct { - Endpoint string - AlphabetDir string - MaxObjectSize int - EpochDuration int - CandidateFee int - ContainerFee int - ContainerAliasFee int - MaxECDataCount int - MaxECParityCount int - WithdrawFee int - Glagolitics []string - HomomorphicHashDisabled bool -} - -const configTxtTemplate = `rpc-endpoint: {{ .Endpoint}} -alphabet-wallets: {{ .AlphabetDir}} -network: - max_object_size: {{ .MaxObjectSize}} - epoch_duration: {{ .EpochDuration}} - max_ec_data_count: {{ .MaxECDataCount}} - max_ec_parity_count: {{ .MaxECParityCount}} - homomorphic_hash_disabled: {{ .HomomorphicHashDisabled}} - fee: - candidate: {{ .CandidateFee}} - container: {{ .ContainerFee}} - container_alias: {{ .ContainerAliasFee }} - withdraw: {{ .WithdrawFee}} -# if credentials section is omitted, then frostfs-adm will require manual password input -credentials: - contract: password # wallet for contract group signature{{ range.Glagolitics}} - {{.}}: password{{end}} -` - -func initConfig(cmd *cobra.Command, _ []string) error { - configPath, err := readConfigPathFromArgs(cmd) - if err != nil { - return nil - } - - pathDir := filepath.Dir(configPath) - err = os.MkdirAll(pathDir, 0o700) - if err != nil { - return fmt.Errorf("create dir %s: %w", pathDir, err) - } - - f, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0o600) - if err != nil { - return fmt.Errorf("open %s: %w", configPath, err) - } - defer f.Close() - - configText, err := generateConfigExample(pathDir, 7) - if err != nil { - return err - } - - _, err = f.WriteString(configText) - if err != nil { - return fmt.Errorf("writing to %s: %w", configPath, err) - } - - cmd.Printf("Initial config file saved to %s\n", configPath) - - return nil -} - -func readConfigPathFromArgs(cmd *cobra.Command) (string, error) { - configPath, err := cmd.Flags().GetString(configPathFlag) - if err != nil { - return "", err - } - - if configPath != "" { - return configPath, nil - } - - return defaultConfigPath() -} - -func defaultConfigPath() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("getting home dir path: %w", err) - } - - return filepath.Join(home, ".frostfs", "adm", "config.yml"), nil -} - -// generateConfigExample builds .yml representation of the config file. It is -// easier to build it manually with template instead of using viper, because we -// want to order records in specific order in file and, probably, provide -// some comments as well. -func generateConfigExample(appDir string, credSize int) (string, error) { - tmpl := configTemplate{ - Endpoint: "https://neo.rpc.node:30333", - MaxObjectSize: 67108864, // 64 MiB - MaxECDataCount: 12, // Tested with 16-node networks, assuming 12 data + 4 parity nodes. - MaxECParityCount: 4, // Maximum 4 parity chunks, typically <= 3 for most policies. - EpochDuration: 240, // 1 hour with 15s per block - HomomorphicHashDisabled: false, // object homomorphic hash is enabled - CandidateFee: 100_0000_0000, // 100.0 GAS (Fixed8) - ContainerFee: 1000, // 0.000000001 * 7 GAS per container (Fixed12) - ContainerAliasFee: 500, // ContainerFee / 2 - WithdrawFee: 1_0000_0000, // 1.0 GAS (Fixed8) - Glagolitics: make([]string, 0, credSize), - } - - appDir, err := filepath.Abs(appDir) - if err != nil { - return "", fmt.Errorf("making absolute path for %s: %w", appDir, err) - } - tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets") - - var i innerring.GlagoliticLetter - for i = range innerring.GlagoliticLetter(credSize) { - tmpl.Glagolitics = append(tmpl.Glagolitics, i.String()) - } - - t, err := template.New("config.yml").Parse(configTxtTemplate) - if err != nil { - return "", fmt.Errorf("parsing config template: %w", err) - } - - buf := bytes.NewBuffer(nil) - - err = t.Execute(buf, tmpl) - if err != nil { - return "", fmt.Errorf("generating config from template: %w", err) - } - - return buf.String(), nil -} - -func GetPassword(v *viper.Viper, name string) (string, error) { - key := "credentials." + name - if v.IsSet(key) { - return v.GetString(key), nil - } - - prompt := "Password for " + name + " wallet > " - return input.ReadPassword(prompt) -} - -func GetStoragePassword(v *viper.Viper, name string) (string, error) { - key := "storage." + name - if name != "" && v.IsSet(key) { - return v.GetString(key), nil - } - return input.ReadPassword("New password > ") -} diff --git a/cmd/frostfs-adm/internal/modules/config/config_test.go b/cmd/frostfs-adm/internal/modules/config/config_test.go deleted file mode 100644 index beb1210e1..000000000 --- a/cmd/frostfs-adm/internal/modules/config/config_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package config - -import ( - "bytes" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" -) - -func TestGenerateConfigExample(t *testing.T) { - const ( - n = 10 - appDir = "/home/example/.frostfs" - ) - - configText, err := generateConfigExample(appDir, n) - require.NoError(t, err) - - v := viper.New() - v.SetConfigType("yml") - - require.NoError(t, v.ReadConfig(bytes.NewBufferString(configText))) - - require.Equal(t, "https://neo.rpc.node:30333", v.GetString("rpc-endpoint")) - require.Equal(t, filepath.Join(appDir, "alphabet-wallets"), v.GetString("alphabet-wallets")) - require.Equal(t, 67108864, v.GetInt("network.max_object_size")) - require.Equal(t, 12, v.GetInt("network.max_ec_data_count")) - require.Equal(t, 4, v.GetInt("network.max_ec_parity_count")) - require.Equal(t, 240, v.GetInt("network.epoch_duration")) - require.Equal(t, 10000000000, v.GetInt("network.fee.candidate")) - require.Equal(t, 1000, v.GetInt("network.fee.container")) - require.Equal(t, 100000000, v.GetInt("network.fee.withdraw")) - - var i innerring.GlagoliticLetter - for i = 0; i < innerring.GlagoliticLetter(n); i++ { - key := "credentials." + i.String() - require.Equal(t, "password", v.GetString(key)) - } - require.Equal(t, "password", v.GetString("credentials.contract")) - - key := "credentials." + i.String() - require.Equal(t, "", v.GetString(key)) -} diff --git a/cmd/frostfs-adm/internal/modules/config/root.go b/cmd/frostfs-adm/internal/modules/config/root.go deleted file mode 100644 index b9c3f2e8c..000000000 --- a/cmd/frostfs-adm/internal/modules/config/root.go +++ /dev/null @@ -1,29 +0,0 @@ -package config - -import ( - "github.com/spf13/cobra" -) - -const configPathFlag = "path" - -var ( - // RootCmd is a root command of config section. - RootCmd = &cobra.Command{ - Use: "config", - Short: "Section for frostfs-adm config related commands", - } - - initCmd = &cobra.Command{ - Use: "init", - Short: "Initialize basic frostfs-adm configuration file", - Example: `frostfs-adm config init -frostfs-adm config init --path .config/frostfs-adm.yml`, - RunE: initConfig, - } -) - -func init() { - RootCmd.AddCommand(initCmd) - - initCmd.Flags().String(configPathFlag, "", "Path to config (default ~/.frostfs/adm/config.yml)") -} diff --git a/cmd/frostfs-adm/internal/modules/config/util.go b/cmd/frostfs-adm/internal/modules/config/util.go deleted file mode 100644 index 74a56ea08..000000000 --- a/cmd/frostfs-adm/internal/modules/config/util.go +++ /dev/null @@ -1,24 +0,0 @@ -package config - -import ( - "os" - "path/filepath" - "strings" -) - -// ResolveHomePath replaces leading `~` -// with home directory. -// -// Does nothing if path does not start -// with contain `~`. -func ResolveHomePath(path string) string { - homeDir, _ := os.UserHomeDir() - - if path == "~" { - path = homeDir - } else if strings.HasPrefix(path, "~/") { - path = filepath.Join(homeDir, path[2:]) - } - - return path -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go deleted file mode 100644 index d67b70d2a..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/root.go +++ /dev/null @@ -1,15 +0,0 @@ -package maintenance - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" - "github.com/spf13/cobra" -) - -var RootCmd = &cobra.Command{ - Use: "maintenance", - Short: "Section for maintenance commands", -} - -func init() { - RootCmd.AddCommand(zombie.Cmd) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go deleted file mode 100644 index 1b66889aa..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go +++ /dev/null @@ -1,70 +0,0 @@ -package zombie - -import ( - "crypto/ecdsa" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { - keyDesc := viper.GetString(walletFlag) - if keyDesc == "" { - return &nodeconfig.Key(appCfg).PrivateKey - } - data, err := os.ReadFile(keyDesc) - commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) - - priv, err := keys.NewPrivateKeyFromBytes(data) - if err != nil { - w, err := wallet.NewWalletFromFile(keyDesc) - commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) - return fromWallet(cmd, w, viper.GetString(addressFlag)) - } - return &priv.PrivateKey -} - -func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { - var ( - addr util.Uint160 - err error - ) - - if addrStr == "" { - addr = w.GetChangeAddress() - } else { - addr, err = flags.ParseAddress(addrStr) - commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) - } - - acc := w.GetAccount(addr) - if acc == nil { - commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) - } - - pass, err := getPassword() - commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) - - commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) - - return &acc.PrivateKey().PrivateKey -} - -func getPassword() (string, error) { - // this check allows empty passwords - if viper.IsSet("password") { - return viper.GetString("password"), nil - } - - return input.ReadPassword("Enter password > ") -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go deleted file mode 100644 index f73f33db9..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go +++ /dev/null @@ -1,31 +0,0 @@ -package zombie - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func list(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - var containerID *cid.ID - if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { - containerID = &cid.ID{} - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - } - - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { - if containerID != nil && a.Container() != *containerID { - return nil - } - cmd.Println(a.EncodeToString()) - return nil - })) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go deleted file mode 100644 index cd3a64499..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go +++ /dev/null @@ -1,46 +0,0 @@ -package zombie - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/spf13/cobra" -) - -func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { - addresses := morphconfig.RPCEndpoint(appCfg) - if len(addresses) == 0 { - commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) - } - key := nodeconfig.Key(appCfg) - cli, err := client.New(cmd.Context(), - key, - client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), - client.WithEndpoints(addresses...), - client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), - ) - commonCmd.ExitOnErr(cmd, "create morph client: %w", err) - return cli -} - -func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { - hs, err := morph.NNSContractAddress(client.NNSContainerContractName) - commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) - cc, err := cntClient.NewFromMorph(morph, hs, 0) - commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) - return cc -} - -func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { - hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) - commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) - cli, err := netmapClient.NewFromMorph(morph, hs, 0) - commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) - return cli -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go deleted file mode 100644 index 27f83aec7..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go +++ /dev/null @@ -1,154 +0,0 @@ -package zombie - -import ( - "context" - "fmt" - "math" - "os" - "path/filepath" - "strings" - "sync" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -type quarantine struct { - // mtx protects current field. - mtx sync.Mutex - current int - trees []*fstree.FSTree -} - -func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { - var paths []string - for _, sh := range engineInfo.Shards { - var storagePaths []string - for _, st := range sh.BlobStorInfo.SubStorages { - storagePaths = append(storagePaths, st.Path) - } - if len(storagePaths) == 0 { - continue - } - paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) - } - q, err := newQuarantine(paths) - commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) - return q -} - -func commonPath(paths []string) string { - if len(paths) == 0 { - return "" - } - if len(paths) == 1 { - return paths[0] - } - minLen := math.MaxInt - for _, p := range paths { - if len(p) < minLen { - minLen = len(p) - } - } - - var sb strings.Builder - for i := range minLen { - for _, path := range paths[1:] { - if paths[0][i] != path[i] { - return sb.String() - } - } - sb.WriteByte(paths[0][i]) - } - return sb.String() -} - -func newQuarantine(paths []string) (*quarantine, error) { - var q quarantine - for i := range paths { - f := fstree.New( - fstree.WithDepth(1), - fstree.WithDirNameLen(1), - fstree.WithPath(paths[i]), - fstree.WithPerm(os.ModePerm), - ) - if err := f.Open(mode.ComponentReadWrite); err != nil { - return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) - } - if err := f.Init(); err != nil { - return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) - } - q.trees = append(q.trees, f) - } - return &q, nil -} - -func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - for i := range q.trees { - res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) - if err != nil { - continue - } - return res.Object, nil - } - return nil, &apistatus.ObjectNotFound{} -} - -func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { - for i := range q.trees { - _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) - if err != nil { - continue - } - return nil - } - return &apistatus.ObjectNotFound{} -} - -func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { - data, err := obj.Marshal() - if err != nil { - return err - } - - var prm common.PutPrm - prm.Address = objectcore.AddressOf(obj) - prm.Object = obj - prm.RawData = data - - q.mtx.Lock() - current := q.current - q.current = (q.current + 1) % len(q.trees) - q.mtx.Unlock() - - _, err = q.trees[current].Put(ctx, prm) - return err -} - -func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { - var prm common.IteratePrm - prm.Handler = func(elem common.IterationElement) error { - return f(elem.Address) - } - for i := range q.trees { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - _, err := q.trees[i].Iterate(ctx, prm) - if err != nil { - return err - } - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go deleted file mode 100644 index 0b8f2f172..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -package zombie - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func remove(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - - var containerID cid.ID - cidStr, _ := cmd.Flags().GetString(cidFlag) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - - var objectID *oid.ID - oidStr, _ := cmd.Flags().GetString(oidFlag) - if oidStr != "" { - objectID = &oid.ID{} - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) - } - - if objectID != nil { - var addr oid.Address - addr.SetContainer(containerID) - addr.SetObject(*objectID) - removeObject(cmd, q, addr) - } else { - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { - if addr.Container() != containerID { - return nil - } - removeObject(cmd, q, addr) - return nil - })) - } -} - -func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { - err := q.Delete(cmd.Context(), addr) - if errors.Is(err, new(apistatus.ObjectNotFound)) { - return - } - commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go deleted file mode 100644 index f179c7c2d..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go +++ /dev/null @@ -1,69 +0,0 @@ -package zombie - -import ( - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func restore(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - morphClient := createMorphClient(cmd, appCfg) - cnrCli := createContainerClient(cmd, morphClient) - - var containerID cid.ID - cidStr, _ := cmd.Flags().GetString(cidFlag) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - - var objectID *oid.ID - oidStr, _ := cmd.Flags().GetString(oidFlag) - if oidStr != "" { - objectID = &oid.ID{} - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) - } - - if objectID != nil { - var addr oid.Address - addr.SetContainer(containerID) - addr.SetObject(*objectID) - restoreObject(cmd, storageEngine, q, addr, cnrCli) - } else { - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { - if addr.Container() != containerID { - return nil - } - restoreObject(cmd, storageEngine, q, addr, cnrCli) - return nil - })) - } -} - -func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { - obj, err := q.Get(cmd.Context(), addr) - commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) - rawCID := make([]byte, sha256.Size) - - cid := addr.Container() - cid.Encode(rawCID) - cnr, err := cnrCli.Get(cmd.Context(), rawCID) - commonCmd.ExitOnErr(cmd, "get container: %w", err) - - putPrm := engine.PutPrm{ - Object: obj, - IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), - } - commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) - commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go deleted file mode 100644 index c8fd9e5e5..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go +++ /dev/null @@ -1,123 +0,0 @@ -package zombie - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - flagBatchSize = "batch-size" - flagBatchSizeUsage = "Objects iteration batch size" - cidFlag = "cid" - cidFlagUsage = "Container ID" - oidFlag = "oid" - oidFlagUsage = "Object ID" - walletFlag = "wallet" - walletFlagShorthand = "w" - walletFlagUsage = "Path to the wallet or binary key" - addressFlag = "address" - addressFlagUsage = "Address of wallet account" - moveFlag = "move" - moveFlagUsage = "Move objects from storage engine to quarantine" -) - -var ( - Cmd = &cobra.Command{ - Use: "zombie", - Short: "Zombie objects related commands", - } - scanCmd = &cobra.Command{ - Use: "scan", - Short: "Scan storage engine for zombie objects and move them to quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) - _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) - _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) - _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) - }, - Run: scan, - } - listCmd = &cobra.Command{ - Use: "list", - Short: "List zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - }, - Run: list, - } - restoreCmd = &cobra.Command{ - Use: "restore", - Short: "Restore zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) - }, - Run: restore, - } - removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) - }, - Run: remove, - } -) - -func init() { - initScanCmd() - initListCmd() - initRestoreCmd() - initRemoveCmd() -} - -func initScanCmd() { - Cmd.AddCommand(scanCmd) - - scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) - scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) - scanCmd.Flags().String(addressFlag, "", addressFlagUsage) - scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) -} - -func initListCmd() { - Cmd.AddCommand(listCmd) - - listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - listCmd.Flags().String(cidFlag, "", cidFlagUsage) -} - -func initRestoreCmd() { - Cmd.AddCommand(restoreCmd) - - restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) - restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) -} - -func initRemoveCmd() { - Cmd.AddCommand(removeCmd) - - removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - removeCmd.Flags().String(cidFlag, "", cidFlagUsage) - removeCmd.Flags().String(oidFlag, "", oidFlagUsage) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go deleted file mode 100644 index 268ec4911..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go +++ /dev/null @@ -1,281 +0,0 @@ -package zombie - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "errors" - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" - "golang.org/x/sync/errgroup" -) - -func scan(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) - if batchSize == 0 { - commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) - } - move, _ := cmd.Flags().GetBool(moveFlag) - - storageEngine := newEngine(cmd, appCfg) - morphClient := createMorphClient(cmd, appCfg) - cnrCli := createContainerClient(cmd, morphClient) - nmCli := createNetmapClient(cmd, morphClient) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - pk := getPrivateKey(cmd, appCfg) - - epoch, err := nmCli.Epoch(cmd.Context()) - commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) - - nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) - commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) - - cmd.Printf("Epoch: %d\n", nm.Epoch()) - cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) - - ps := &processStatus{ - statusCount: make(map[status]uint64), - } - - stopCh := make(chan struct{}) - start := time.Now() - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - tick := time.NewTicker(time.Second) - defer tick.Stop() - for { - select { - case <-cmd.Context().Done(): - return - case <-stopCh: - return - case <-tick.C: - fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) - } - } - }() - go func() { - defer wg.Done() - err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) - close(stopCh) - }() - wg.Wait() - commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) - - cmd.Println() - cmd.Println("Status description:") - cmd.Println("undefined -- nothing is clear") - cmd.Println("found -- object is found in cluster") - cmd.Println("quarantine -- object is not found in cluster") - cmd.Println() - for status, count := range ps.statusCount { - cmd.Printf("Status: %s, Count: %d\n", status, count) - } -} - -type status string - -const ( - statusUndefined status = "undefined" - statusFound status = "found" - statusQuarantine status = "quarantine" -) - -func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { - rawCID := make([]byte, sha256.Size) - cid := obj.Address.Container() - cid.Encode(rawCID) - - cnr, err := cnrCli.Get(ctx, rawCID) - if err != nil { - var errContainerNotFound *apistatus.ContainerNotFound - if errors.As(err, &errContainerNotFound) { - // Policer will deal with this object. - return statusFound, nil - } - return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) - } - nm, err := nmCli.NetMap(ctx) - if err != nil { - return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) - } - - nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) - if err != nil { - // Not enough nodes, check all netmap nodes. - nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) - } - - objID := obj.Address.Object() - cnrID := obj.Address.Container() - local := true - raw := false - if obj.ECInfo != nil { - objID = obj.ECInfo.ParentID - local = false - raw = true - } - prm := clientSDK.PrmObjectHead{ - ObjectID: &objID, - ContainerID: &cnrID, - Local: local, - Raw: raw, - } - - var ni clientCore.NodeInfo - for i := range nodes { - for j := range nodes[i] { - if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { - return statusUndefined, fmt.Errorf("parse node info: %w", err) - } - c, err := cc.Get(ni) - if err != nil { - continue - } - res, err := c.ObjectHead(ctx, prm) - if err != nil { - var errECInfo *objectSDK.ECInfoError - if raw && errors.As(err, &errECInfo) { - return statusFound, nil - } - continue - } - if err := apistatus.ErrFromStatus(res.Status()); err != nil { - continue - } - return statusFound, nil - } - } - - if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { - return statusFound, nil - } - return statusQuarantine, nil -} - -func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, - appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, -) error { - cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ - DialTimeout: apiclientconfig.DialTimeout(appCfg), - StreamTimeout: apiclientconfig.StreamTimeout(appCfg), - ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), - Key: pk, - AllowExternal: apiclientconfig.AllowExternal(appCfg), - }) - ctx := cmd.Context() - - var cursor *engine.Cursor - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var prm engine.ListWithCursorPrm - prm.WithCursor(cursor) - prm.WithCount(batchSize) - - res, err := storageEngine.ListWithCursor(ctx, prm) - if err != nil { - if errors.Is(err, engine.ErrEndOfListing) { - return nil - } - return fmt.Errorf("list with cursor: %w", err) - } - - cursor = res.Cursor() - addrList := res.AddressList() - eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(int(batchSize)) - - for i := range addrList { - addr := addrList[i] - eg.Go(func() error { - result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) - if err != nil { - return fmt.Errorf("check object %s status: %w", addr.Address, err) - } - ps.add(result) - - if !move && result == statusQuarantine { - cmd.Println(addr) - return nil - } - - if result == statusQuarantine { - return moveToQuarantine(egCtx, storageEngine, q, addr.Address) - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return fmt.Errorf("process objects batch: %w", err) - } - } -} - -func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { - var getPrm engine.GetPrm - getPrm.WithAddress(addr) - res, err := storageEngine.Get(ctx, getPrm) - if err != nil { - return fmt.Errorf("get object %s from storage engine: %w", addr, err) - } - - if err := q.Put(ctx, res.Object()); err != nil { - return fmt.Errorf("put object %s to quarantine: %w", addr, err) - } - - var delPrm engine.DeletePrm - delPrm.WithForceRemoval() - delPrm.WithAddress(addr) - - if err = storageEngine.Delete(ctx, delPrm); err != nil { - return fmt.Errorf("delete object %s from storage engine: %w", addr, err) - } - return nil -} - -type processStatus struct { - guard sync.RWMutex - statusCount map[status]uint64 - count uint64 -} - -func (s *processStatus) add(st status) { - s.guard.Lock() - defer s.guard.Unlock() - s.statusCount[st]++ - s.count++ -} - -func (s *processStatus) total() uint64 { - s.guard.RLock() - defer s.guard.RUnlock() - return s.count -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go deleted file mode 100644 index 5be34d502..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go +++ /dev/null @@ -1,201 +0,0 @@ -package zombie - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" - fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/panjf2000/ants/v2" - "github.com/spf13/cobra" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { - ngOpts := storageEngineOptions(c) - shardOpts := shardOptions(cmd, c) - e := engine.New(ngOpts...) - for _, opts := range shardOpts { - _, err := e.AddShard(cmd.Context(), opts...) - commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) - } - commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) - commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) - return e -} - -func storageEngineOptions(c *config.Config) []engine.Option { - return []engine.Option{ - engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), - engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), - } -} - -func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { - var result [][]shard.Option - err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { - result = append(result, getShardOpts(cmd, c, sh)) - return nil - }) - commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) - return result -} - -func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { - wc, wcEnabled := getWriteCacheOpts(sh) - return []shard.Option{ - shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - shard.WithRefillMetabase(sh.RefillMetabase()), - shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), - shard.WithMode(sh.Mode()), - shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), - shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), - shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), - shard.WithWriteCache(wcEnabled), - shard.WithWriteCacheOptions(wc), - shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), - shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), - shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), - shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) - return pool - }), - shard.WithLimiter(qos.NewNoopLimiter()), - } -} - -func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { - if wc := sh.WriteCache(); wc != nil && wc.Enabled() { - var result []writecache.Option - result = append(result, - writecache.WithPath(wc.Path()), - writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), - writecache.WithMaxObjectSize(wc.MaxObjectSize()), - writecache.WithFlushWorkersCount(wc.WorkerCount()), - writecache.WithMaxCacheSize(wc.SizeLimit()), - writecache.WithMaxCacheCount(wc.CountLimit()), - writecache.WithNoSync(wc.NoSync()), - writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - writecache.WithQoSLimiter(qos.NewNoopLimiter()), - ) - return result, true - } - return nil, false -} - -func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { - var piloramaOpts []pilorama.Option - if config.BoolSafe(c.Sub("tree"), "enabled") { - pr := sh.Pilorama() - piloramaOpts = append(piloramaOpts, - pilorama.WithPath(pr.Path()), - pilorama.WithPerm(pr.Perm()), - pilorama.WithNoSync(pr.NoSync()), - pilorama.WithMaxBatchSize(pr.MaxBatchSize()), - pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), - ) - } - return piloramaOpts -} - -func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { - return []meta.Option{ - meta.WithPath(sh.Metabase().Path()), - meta.WithPermissions(sh.Metabase().BoltDB().Perm()), - meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), - meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), - meta.WithBoltDBOptions(&bbolt.Options{ - Timeout: 100 * time.Millisecond, - }), - meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - meta.WithEpochState(&epochState{}), - } -} - -func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { - result := []blobstor.Option{ - blobstor.WithCompression(sh.Compression()), - blobstor.WithStorages(getSubStorages(ctx, sh)), - blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - } - - return result -} - -func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { - var ss []blobstor.SubStorage - for _, storage := range sh.BlobStor().Storages() { - switch storage.Type() { - case blobovniczatree.Type: - sub := blobovniczaconfig.From((*config.Config)(storage)) - blobTreeOpts := []blobovniczatree.Option{ - blobovniczatree.WithRootPath(storage.Path()), - blobovniczatree.WithPermissions(storage.Perm()), - blobovniczatree.WithBlobovniczaSize(sub.Size()), - blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), - blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), - blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), - blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), - blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), - blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), - blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), - blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), - blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), - blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), - } - - ss = append(ss, blobstor.SubStorage{ - Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < sh.SmallSizeLimit() - }, - }) - case fstree.Type: - sub := fstreeconfig.From((*config.Config)(storage)) - fstreeOpts := []fstree.Option{ - fstree.WithPath(storage.Path()), - fstree.WithPerm(storage.Perm()), - fstree.WithDepth(sub.Depth()), - fstree.WithNoSync(sub.NoSync()), - fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - } - - ss = append(ss, blobstor.SubStorage{ - Storage: fstree.New(fstreeOpts...), - Policy: func(_ *objectSDK.Object, _ []byte) bool { - return true - }, - }) - default: - // should never happen, that has already - // been handled: when the config was read - } - } - return ss -} - -type epochState struct{} - -func (epochState) CurrentEpoch() uint64 { - return 0 -} diff --git a/cmd/frostfs-adm/internal/modules/metabase/root.go b/cmd/frostfs-adm/internal/modules/metabase/root.go deleted file mode 100644 index 5b21ed273..000000000 --- a/cmd/frostfs-adm/internal/modules/metabase/root.go +++ /dev/null @@ -1,15 +0,0 @@ -package metabase - -import "github.com/spf13/cobra" - -// RootCmd is a root command of config section. -var RootCmd = &cobra.Command{ - Use: "metabase", - Short: "Section for metabase commands", -} - -func init() { - RootCmd.AddCommand(UpgradeCmd) - - initUpgradeCommand() -} diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go deleted file mode 100644 index c0c290c5e..000000000 --- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go +++ /dev/null @@ -1,156 +0,0 @@ -package metabase - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "github.com/spf13/cobra" - "golang.org/x/sync/errgroup" -) - -const ( - noCompactFlag = "no-compact" -) - -var ( - errNoPathsFound = errors.New("no metabase paths found") - errNoMorphEndpointsFound = errors.New("no morph endpoints found") - errUpgradeFailed = errors.New("upgrade failed") -) - -var UpgradeCmd = &cobra.Command{ - Use: "upgrade", - Short: "Upgrade metabase to latest version", - RunE: upgrade, -} - -func upgrade(cmd *cobra.Command, _ []string) error { - configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag) - if err != nil { - return err - } - configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag) - if err != nil { - return err - } - appCfg := config.New(configFile, configDir, config.EnvPrefix) - paths, err := getMetabasePaths(appCfg) - if err != nil { - return err - } - if len(paths) == 0 { - return errNoPathsFound - } - cmd.Println("found", len(paths), "metabases:") - for i, path := range paths { - cmd.Println(i+1, ":", path) - } - mc, err := createMorphClient(cmd.Context(), appCfg) - if err != nil { - return err - } - defer mc.Close() - civ, err := createContainerInfoProvider(mc) - if err != nil { - return err - } - noCompact, _ := cmd.Flags().GetBool(noCompactFlag) - result := make(map[string]bool) - var resultGuard sync.Mutex - eg, ctx := errgroup.WithContext(cmd.Context()) - for _, path := range paths { - eg.Go(func() error { - var success bool - cmd.Println("upgrading metabase", path, "...") - if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) { - cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...) - }); err != nil { - cmd.Println("error: failed to upgrade metabase", path, ":", err) - } else { - success = true - cmd.Println("metabase", path, "upgraded successfully") - } - resultGuard.Lock() - result[path] = success - resultGuard.Unlock() - return nil - }) - } - if err := eg.Wait(); err != nil { - return err - } - allSuccess := true - for mb, ok := range result { - if ok { - cmd.Println(mb, ": success") - } else { - cmd.Println(mb, ": failed") - allSuccess = false - } - } - if allSuccess { - return nil - } - return errUpgradeFailed -} - -func getMetabasePaths(appCfg *config.Config) ([]string, error) { - var paths []string - if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error { - paths = append(paths, sc.Metabase().Path()) - return nil - }); err != nil { - return nil, fmt.Errorf("get metabase paths: %w", err) - } - return paths, nil -} - -func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) { - addresses := morphconfig.RPCEndpoint(appCfg) - if len(addresses) == 0 { - return nil, errNoMorphEndpointsFound - } - key := nodeconfig.Key(appCfg) - cli, err := client.New(ctx, - key, - client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), - client.WithEndpoints(addresses...), - client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), - ) - if err != nil { - return nil, fmt.Errorf("create morph client:%w", err) - } - return cli, nil -} - -func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) { - sh, err := cli.NNSContractAddress(client.NNSContainerContractName) - if err != nil { - return nil, fmt.Errorf("resolve container contract hash: %w", err) - } - cc, err := morphcontainer.NewFromMorph(cli, sh, 0) - if err != nil { - return nil, fmt.Errorf("create morph container client: %w", err) - } - return container.NewInfoProvider(func() (container.Source, error) { - return morphcontainer.AsContainerSource(cc), nil - }), nil -} - -func initUpgradeCommand() { - flags := UpgradeCmd.Flags() - flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file") -} diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go deleted file mode 100644 index 1960faab4..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go +++ /dev/null @@ -1,250 +0,0 @@ -package ape - -import ( - "bytes" - "encoding/json" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - jsonFlag = "json" - jsonFlagDesc = "Output rule chains in JSON format" - addrAdminFlag = "addr" - addrAdminDesc = "The address of the admins wallet" -) - -var ( - addRuleChainCmd = &cobra.Command{ - Use: "add-rule-chain", - Short: "Add rule chain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - }, - Run: addRuleChain, - } - - removeRuleChainCmd = &cobra.Command{ - Use: "rm-rule-chain", - Short: "Remove rule chain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - }, - Run: removeRuleChain, - } - - listRuleChainsCmd = &cobra.Command{ - Use: "list-rule-chains", - Short: "List rule chains", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: listRuleChains, - } - - setAdminCmd = &cobra.Command{ - Use: "set-admin", - Short: "Set admin", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - }, - Run: setAdmin, - } - - getAdminCmd = &cobra.Command{ - Use: "get-admin", - Short: "Get admin", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: getAdmin, - } - - listTargetsCmd = &cobra.Command{ - Use: "list-targets", - Short: "List targets", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: listTargets, - } -) - -func initAddRuleChainCmd() { - Cmd.AddCommand(addRuleChainCmd) - - addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - - addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) - addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc) - _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag) - - addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) - _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag) - addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc) - addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc) - addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) - addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag) -} - -func initRemoveRuleChainCmd() { - Cmd.AddCommand(removeRuleChainCmd) - - removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - - removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) - removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag) - removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) - removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) - removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target") - removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag) -} - -func initListRuleChainsCmd() { - Cmd.AddCommand(listRuleChainsCmd) - - listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc) - _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) - listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc) - listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) -} - -func initSetAdminCmd() { - Cmd.AddCommand(setAdminCmd) - - setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - setAdminCmd.Flags().String(addrAdminFlag, "", addrAdminDesc) - _ = setAdminCmd.MarkFlagRequired(addrAdminFlag) -} - -func initGetAdminCmd() { - Cmd.AddCommand(getAdminCmd) - - getAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) -} - -func initListTargetsCmd() { - Cmd.AddCommand(listTargetsCmd) - - listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc) - _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) -} - -func addRuleChain(cmd *cobra.Command, _ []string) { - chain := apeCmd.ParseChain(cmd) - target := parseTarget(cmd) - pci, ac := newPolicyContractInterface(cmd) - h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain) - cmd.Println("Waiting for transaction to persist...") - _, err = ac.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err) - cmd.Println("Rule chain added successfully") -} - -func removeRuleChain(cmd *cobra.Command, _ []string) { - target := parseTarget(cmd) - pci, ac := newPolicyContractInterface(cmd) - removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag) - if removeAll { - h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target) - cmd.Println("Waiting for transaction to persist...") - _, err = ac.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err) - cmd.Println("All chains for target removed successfully") - } else { - chainID := apeCmd.ParseChainID(cmd) - h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID) - cmd.Println("Waiting for transaction to persist...") - _, err = ac.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err) - cmd.Println("Rule chain removed successfully") - } -} - -func listRuleChains(cmd *cobra.Command, _ []string) { - target := parseTarget(cmd) - pci, _ := newPolicyContractReaderInterface(cmd) - chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target) - commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err) - if len(chains) == 0 { - return - } - - toJSON, _ := cmd.Flags().GetBool(jsonFlag) - if toJSON { - prettyJSONFormat(cmd, chains) - } else { - for _, c := range chains { - apeCmd.PrintHumanReadableAPEChain(cmd, c) - } - } -} - -func setAdmin(cmd *cobra.Command, _ []string) { - s, _ := cmd.Flags().GetString(addrAdminFlag) - addr, err := address.StringToUint160(s) - commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err) - pci, ac := newPolicyContractInterface(cmd) - h, vub, err := pci.SetAdmin(addr) - cmd.Println("Waiting for transaction to persist...") - _, err = ac.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "can't set admin: %w", err) - cmd.Println("Admin set successfully") -} - -func getAdmin(cmd *cobra.Command, _ []string) { - pci, _ := newPolicyContractReaderInterface(cmd) - addr, err := pci.GetAdmin() - commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err) - cmd.Println(address.Uint160ToString(addr)) -} - -func listTargets(cmd *cobra.Command, _ []string) { - typ := apeCmd.ParseTargetType(cmd) - pci, inv := newPolicyContractReaderInterface(cmd) - - sid, it, err := pci.ListTargetsIterator(typ) - commonCmd.ExitOnErr(cmd, "list targets error: %w", err) - items, err := inv.TraverseIterator(sid, &it, 0) - for err == nil && len(items) != 0 { - for _, item := range items { - bts, err := item.TryBytes() - commonCmd.ExitOnErr(cmd, "list targets error: %w", err) - if len(bts) == 0 { - cmd.Println("(no name)") - } else { - cmd.Println(string(bts)) - } - } - items, err = inv.TraverseIterator(sid, &it, 0) - commonCmd.ExitOnErr(cmd, "unable to list targets: %w", err) - } -} - -func prettyJSONFormat(cmd *cobra.Command, chains []*apechain.Chain) { - wr := bytes.NewBufferString("") - data, err := json.Marshal(chains) - if err == nil { - err = json.Indent(wr, data, "", " ") - } - commonCmd.ExitOnErr(cmd, "print rule chain error: %w", err) - cmd.Println(wr) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go deleted file mode 100644 index 3c332c3f0..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ /dev/null @@ -1,94 +0,0 @@ -package ape - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var errUnknownTargetType = errors.New("unknown target type") - -func parseTarget(cmd *cobra.Command) policyengine.Target { - typ := apeCmd.ParseTargetType(cmd) - name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag) - switch typ { - case policyengine.Namespace: - if name == "root" { - name = "" - } - return policyengine.NamespaceTarget(name) - case policyengine.Container: - var cnr cid.ID - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) - return policyengine.ContainerTarget(name) - case policyengine.User: - return policyengine.UserTarget(name) - case policyengine.Group: - return policyengine.GroupTarget(name) - default: - commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) - } - panic("unreachable") -} - -// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface. -type invokerAdapter struct { - *invoker.Invoker - rpcActor invoker.RPCInvoke -} - -func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { - return n.rpcActor -} - -func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) { - c, err := helper.NewRemoteClient(viper.GetViper()) - commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - - inv := invoker.New(c, nil) - r := management.NewReader(inv) - nnsCs, err := helper.GetContractByID(r, 1) - commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - - ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) - commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err) - - invokerAdapter := &invokerAdapter{ - Invoker: inv, - rpcActor: c, - } - - return morph.NewContractStorageReader(invokerAdapter, ch), inv -} - -func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) { - c, err := helper.NewRemoteClient(viper.GetViper()) - commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) - ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName}) - commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) - - var ch util.Uint160 - r := management.NewReader(ac.Invoker) - nnsCs, err := helper.GetContractByID(r, 1) - commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - - ch, err = helper.NNSResolveHash(ac.Invoker, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) - commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err) - - return morph.NewContractStorage(ac, ch), ac -} diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/root.go b/cmd/frostfs-adm/internal/modules/morph/ape/root.go deleted file mode 100644 index a4746cd2c..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/ape/root.go +++ /dev/null @@ -1,17 +0,0 @@ -package ape - -import "github.com/spf13/cobra" - -var Cmd = &cobra.Command{ - Use: "ape", - Short: "Section for APE configuration commands", -} - -func init() { - initAddRuleChainCmd() - initRemoveRuleChainCmd() - initListRuleChainsCmd() - initSetAdminCmd() - initGetAdminCmd() - initListTargetsCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go deleted file mode 100644 index 23dba14f4..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ /dev/null @@ -1,243 +0,0 @@ -package balance - -import ( - "crypto/elliptic" - "errors" - "fmt" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -type accBalancePair struct { - scriptHash util.Uint160 - balance *big.Int -} - -const ( - dumpBalancesStorageFlag = "storage" - dumpBalancesAlphabetFlag = "alphabet" - dumpBalancesProxyFlag = "proxy" - dumpBalancesUseScriptHashFlag = "script-hash" -) - -func dumpBalances(cmd *cobra.Command, _ []string) error { - var ( - dumpStorage, _ = cmd.Flags().GetBool(dumpBalancesStorageFlag) - dumpAlphabet, _ = cmd.Flags().GetBool(dumpBalancesAlphabetFlag) - dumpProxy, _ = cmd.Flags().GetBool(dumpBalancesProxyFlag) - nnsCs *state.Contract - nmHash util.Uint160 - ) - - c, err := helper.NewRemoteClient(viper.GetViper()) - if err != nil { - return err - } - - inv := invoker.New(c, nil) - - if dumpStorage || dumpAlphabet || dumpProxy { - r := management.NewReader(inv) - nnsCs, err = helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't get NNS contract info: %w", err) - } - - nmHash, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.NetmapContract)) - if err != nil { - return fmt.Errorf("can't get netmap contract hash: %w", err) - } - } - - irList, err := fetchIRNodes(c, rolemgmt.Hash) - if err != nil { - return err - } - - if err := fetchBalances(inv, gas.Hash, irList); err != nil { - return err - } - printBalances(cmd, "Inner ring nodes balances:", irList) - - if dumpStorage { - if err := printStorageNodeBalances(cmd, inv, nmHash); err != nil { - return err - } - } - - if dumpProxy { - if err := printProxyContractBalance(cmd, inv, nnsCs.Hash); err != nil { - return err - } - } - - if dumpAlphabet { - if err := printAlphabetContractBalances(cmd, c, inv, len(irList), nnsCs.Hash); err != nil { - return err - } - } - - return nil -} - -func printStorageNodeBalances(cmd *cobra.Command, inv *invoker.Invoker, nmHash util.Uint160) error { - arr, err := unwrap.Array(inv.Call(nmHash, "netmap")) - if err != nil { - return errors.New("can't fetch the list of storage nodes") - } - - snList := make([]accBalancePair, len(arr)) - for i := range arr { - node, ok := arr[i].Value().([]stackitem.Item) - if !ok || len(node) == 0 { - return errors.New("can't parse the list of storage nodes") - } - bs, err := node[0].TryBytes() - if err != nil { - return errors.New("can't parse the list of storage nodes") - } - var ni netmap.NodeInfo - if err := ni.Unmarshal(bs); err != nil { - return fmt.Errorf("can't parse the list of storage nodes: %w", err) - } - pub, err := keys.NewPublicKeyFromBytes(ni.PublicKey(), elliptic.P256()) - if err != nil { - return fmt.Errorf("can't parse storage node public key: %w", err) - } - snList[i].scriptHash = pub.GetScriptHash() - } - - if err := fetchBalances(inv, gas.Hash, snList); err != nil { - return err - } - - printBalances(cmd, "\nStorage node balances:", snList) - return nil -} - -func printProxyContractBalance(cmd *cobra.Command, inv *invoker.Invoker, nnsHash util.Uint160) error { - h, err := helper.NNSResolveHash(inv, nnsHash, helper.DomainOf(constants.ProxyContract)) - if err != nil { - return fmt.Errorf("can't get hash of the proxy contract: %w", err) - } - - proxyList := []accBalancePair{{scriptHash: h}} - if err := fetchBalances(inv, gas.Hash, proxyList); err != nil { - return err - } - - printBalances(cmd, "\nProxy contract balance:", proxyList) - return nil -} - -func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *invoker.Invoker, count int, nnsHash util.Uint160) error { - alphaList := make([]accBalancePair, count) - - w := io.NewBufBinWriter() - for i := range alphaList { - emit.AppCall(w.BinWriter, nnsHash, "resolve", callflag.ReadOnly, - helper.GetAlphabetNNSDomain(i), - int64(nns.TXT)) - } - assert.NoError(w.Err) - - alphaRes, err := c.InvokeScript(w.Bytes(), nil) - if err != nil { - return fmt.Errorf("can't fetch info from NNS: %w", err) - } - - for i := range alphaList { - h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]) - if err != nil { - return fmt.Errorf("can't fetch the alphabet contract #%d hash: %w", i, err) - } - alphaList[i].scriptHash = h - } - - if err := fetchBalances(inv, gas.Hash, alphaList); err != nil { - return err - } - - printBalances(cmd, "\nAlphabet contracts balances:", alphaList) - return nil -} - -func fetchIRNodes(c helper.Client, desigHash util.Uint160) ([]accBalancePair, error) { - inv := invoker.New(c, nil) - - height, err := c.GetBlockCount() - if err != nil { - return nil, fmt.Errorf("can't get block height: %w", err) - } - - arr, err := helper.GetDesignatedByRole(inv, desigHash, noderoles.NeoFSAlphabet, height) - if err != nil { - return nil, errors.New("can't fetch list of IR nodes from the netmap contract") - } - - irList := make([]accBalancePair, len(arr)) - for i := range arr { - irList[i].scriptHash = arr[i].GetScriptHash() - } - return irList, nil -} - -func printBalances(cmd *cobra.Command, prefix string, accounts []accBalancePair) { - useScriptHash, _ := cmd.Flags().GetBool(dumpBalancesUseScriptHashFlag) - - cmd.Println(prefix) - for i := range accounts { - var addr string - if useScriptHash { - addr = accounts[i].scriptHash.StringLE() - } else { - addr = address.Uint160ToString(accounts[i].scriptHash) - } - cmd.Printf("%s: %s\n", addr, fixedn.ToString(accounts[i].balance, 8)) - } -} - -func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalancePair) error { - w := io.NewBufBinWriter() - for i := range accounts { - emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) - } - assert.NoError(w.Err) - - res, err := c.Run(w.Bytes()) - if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { - return errors.New("can't fetch account balances") - } - - for i := range accounts { - bal, err := res.Stack[i].TryInteger() - if err != nil { - return fmt.Errorf("can't parse account balance: %w", err) - } - accounts[i].balance = bal - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/root.go b/cmd/frostfs-adm/internal/modules/morph/balance/root.go deleted file mode 100644 index 3be712367..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/balance/root.go +++ /dev/null @@ -1,28 +0,0 @@ -package balance - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var DumpCmd = &cobra.Command{ - Use: "dump-balances", - Short: "Dump GAS balances", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: dumpBalances, -} - -func initDumpBalancesCmd() { - DumpCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - DumpCmd.Flags().BoolP(dumpBalancesStorageFlag, "s", false, "Dump balances of storage nodes from the current netmap") - DumpCmd.Flags().BoolP(dumpBalancesAlphabetFlag, "a", false, "Dump balances of alphabet contracts") - DumpCmd.Flags().BoolP(dumpBalancesProxyFlag, "p", false, "Dump balances of the proxy contract") - DumpCmd.Flags().Bool(dumpBalancesUseScriptHashFlag, false, "Use script-hash format for addresses") -} - -func init() { - initDumpBalancesCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go deleted file mode 100644 index c17fb62ff..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ /dev/null @@ -1,219 +0,0 @@ -package config - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "strconv" - "strings" - "text/tabwriter" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const forceConfigSet = "force" - -func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { - c, err := helper.NewRemoteClient(viper.GetViper()) - if err != nil { - return fmt.Errorf("can't create N3 client: %w", err) - } - - inv := invoker.New(c, nil) - r := management.NewReader(inv) - - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't get NNS contract info: %w", err) - } - - nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.NetmapContract)) - if err != nil { - return fmt.Errorf("can't get netmap contract hash: %w", err) - } - - arr, err := unwrap.Array(inv.Call(nmHash, "listConfig")) - if err != nil { - return errors.New("can't fetch list of network config keys from the netmap contract") - } - - buf := bytes.NewBuffer(nil) - tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - - m, err := helper.ParseConfigFromNetmapContract(arr) - if err != nil { - return err - } - for k, v := range m { - switch k { - case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig, - netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig, - netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, - netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: - nbuf := make([]byte, 8) - copy(nbuf, v) - n := binary.LittleEndian.Uint64(nbuf) - _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) - case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: - if len(v) == 0 || len(v) > 1 { - return helper.InvalidConfigValueErr(k) - } - _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) - default: - _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) - } - } - - _ = tw.Flush() - cmd.Print(buf.String()) - - return nil -} - -func SetConfigCmd(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("empty config pairs") - } - - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("can't initialize context: %w", err) - } - - r := management.NewReader(wCtx.ReadOnlyInvoker) - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't get NNS contract info: %w", err) - } - - nmHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.NetmapContract)) - if err != nil { - return fmt.Errorf("can't get netmap contract hash: %w", err) - } - - forceFlag, _ := cmd.Flags().GetBool(forceConfigSet) - bw := io.NewBufBinWriter() - prm := make(map[string]any) - for _, arg := range args { - k, v, err := parseConfigPair(arg, forceFlag) - if err != nil { - return err - } - - prm[k] = v - } - - if err := validateConfig(prm, forceFlag); err != nil { - return err - } - - for k, v := range prm { - // In NeoFS this is done via Notary contract. Here, however, we can form the - // transaction locally. The first `nil` argument is required only for notary - // disabled environment which is not supported by that command. - emit.AppCall(bw.BinWriter, nmHash, "setConfig", callflag.All, nil, k, v) - if bw.Err != nil { - return fmt.Errorf("can't form raw transaction: %w", bw.Err) - } - } - - err = wCtx.SendConsensusTx(bw.Bytes()) - if err != nil { - return err - } - - return wCtx.AwaitTx() -} - -const maxECSum = 256 - -func validateConfig(args map[string]any, forceFlag bool) error { - var sumEC int64 - _, okData := args[netmap.MaxECDataCountConfig] - _, okParity := args[netmap.MaxECParityCountConfig] - if okData != okParity { - return fmt.Errorf("both %s and %s must be present in the configuration", - netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig) - } - - for k, v := range args { - switch k { - case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig, - netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig, - netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, - netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: - value, ok := v.(int64) - if !ok { - return fmt.Errorf("%s has an invalid type. Expected type: int", k) - } - - if value < 0 { - return fmt.Errorf("%s must be >= 0, got %v", k, v) - } - - if k == netmap.MaxECDataCountConfig || k == netmap.MaxECParityCountConfig { - sumEC += value - } - case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: - _, ok := v.(bool) - if !ok { - return fmt.Errorf("%s has an invalid type. Expected type: bool", k) - } - } - } - - if sumEC > maxECSum && !forceFlag { - return fmt.Errorf("the sum of %s and %s must be <= %d, got %d", - netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig, maxECSum, sumEC) - } - return nil -} - -func parseConfigPair(kvStr string, force bool) (key string, val any, err error) { - k, v, found := strings.Cut(kvStr, "=") - if !found { - return "", nil, fmt.Errorf("invalid parameter format: must be 'key=val', got: %s", kvStr) - } - - key = k - valRaw := v - - switch key { - case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig, - netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig, - netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, - netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: - val, err = strconv.ParseInt(valRaw, 10, 64) - if err != nil { - err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err) - } - case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: - val, err = strconv.ParseBool(valRaw) - if err != nil { - err = fmt.Errorf("could not parse %s's value '%s' as bool: %w", key, valRaw, err) - } - - default: - if !force { - return "", nil, fmt.Errorf( - "'%s' key is not well-known, use '--%s' flag if want to set it anyway", - key, forceConfigSet) - } - - val = valRaw - } - - return -} diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config_test.go b/cmd/frostfs-adm/internal/modules/morph/config/config_test.go deleted file mode 100644 index c6d5b2827..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/config/config_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package config - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/stretchr/testify/require" -) - -func Test_ValidateConfig(t *testing.T) { - testArgs := make(map[string]any) - - testArgs[netmap.MaxECDataCountConfig] = int64(11) - require.Error(t, validateConfig(testArgs, false)) - - testArgs[netmap.MaxECParityCountConfig] = int64(256) - require.Error(t, validateConfig(testArgs, false)) - require.NoError(t, validateConfig(testArgs, true)) - - testArgs[netmap.MaxECParityCountConfig] = int64(-1) - require.Error(t, validateConfig(testArgs, false)) - - testArgs[netmap.MaxECParityCountConfig] = int64(55) - require.NoError(t, validateConfig(testArgs, false)) - - testArgs[netmap.HomomorphicHashingDisabledKey] = "1" - require.Error(t, validateConfig(testArgs, false)) - - testArgs[netmap.HomomorphicHashingDisabledKey] = true - require.NoError(t, validateConfig(testArgs, false)) - - testArgs["not-well-known-configuration-key"] = "key" - require.NoError(t, validateConfig(testArgs, false)) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/config/root.go b/cmd/frostfs-adm/internal/modules/morph/config/root.go deleted file mode 100644 index 6b9094de0..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/config/root.go +++ /dev/null @@ -1,46 +0,0 @@ -package config - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - SetCmd = &cobra.Command{ - Use: "set-config key1=val1 [key2=val2 ...]", - DisableFlagsInUseLine: true, - Short: "Add/update global config value in the FrostFS network", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Args: cobra.MinimumNArgs(1), - RunE: SetConfigCmd, - } - - DumpCmd = &cobra.Command{ - Use: "dump-config", - Short: "Dump FrostFS network config", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: dumpNetworkConfig, - } -) - -func initSetConfigCmd() { - SetCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - SetCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - SetCmd.Flags().Bool(forceConfigSet, false, "Force setting not well-known configuration key") - SetCmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") -} - -func initDumpNetworkConfigCmd() { - DumpCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) -} - -func init() { - initSetConfigCmd() - initDumpNetworkConfigCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go deleted file mode 100644 index be4041a86..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go +++ /dev/null @@ -1,59 +0,0 @@ -package constants - -import "time" - -const ( - ConsensusAccountName = "consensus" - - // MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size - // of the invocation script. - // See: https://github.com/nspcc-dev/neo-go/blob/740488f7f35e367eaa99a71c0a609c315fe2b0fc/pkg/core/transaction/witness.go#L10 - MaxAlphabetNodes = 22 - - SingleAccountName = "single" - CommitteeAccountName = "committee" - - NNSContract = "nns" - FrostfsContract = "frostfs" // not deployed in side-chain. - ProcessingContract = "processing" // not deployed in side-chain. - AlphabetContract = "alphabet" - BalanceContract = "balance" - ContainerContract = "container" - FrostfsIDContract = "frostfsid" - NetmapContract = "netmap" - PolicyContract = "policy" - ProxyContract = "proxy" - - ContractWalletFilename = "contract.json" - ContractWalletPasswordKey = "contract" - - FrostfsOpsEmail = "ops@frostfs.info" - NNSRefreshDefVal = int64(3600) - NNSRetryDefVal = int64(600) - NNSTtlDefVal = int64(3600) - - DefaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second - - DeployMethodName = "deploy" - UpdateMethodName = "update" - - TestContractPassword = "grouppass" -) - -var ( - ContractList = []string{ - BalanceContract, - ContainerContract, - FrostfsIDContract, - NetmapContract, - PolicyContract, - ProxyContract, - } - - FullContractList = append([]string{ - FrostfsContract, - ProcessingContract, - NNSContract, - AlphabetContract, - }, ContractList...) -) diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go deleted file mode 100644 index 79685f111..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ /dev/null @@ -1,386 +0,0 @@ -package container - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "slices" - "sort" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/nspcc-dev/neo-go/pkg/crypto/hash" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var errInvalidContainerResponse = errors.New("invalid response from container contract") - -func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker) (util.Uint160, error) { - s, err := cmd.Flags().GetString(containerContractFlag) - var ch util.Uint160 - if err == nil { - ch, err = util.Uint160DecodeStringLE(s) - } - if err != nil { - r := management.NewReader(inv) - nnsCs, err := helper.GetContractByID(r, 1) - if err != nil { - return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err) - } - ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.ContainerContract)) - if err != nil { - return util.Uint160{}, err - } - } - return ch, nil -} - -func iterateContainerList(inv *invoker.Invoker, ch util.Uint160, f func([]byte) error) error { - sid, r, err := unwrap.SessionIterator(inv.Call(ch, "containersOf", "")) - if err != nil { - return fmt.Errorf("%w: %v", errInvalidContainerResponse, err) - } - // Nothing bad, except live session on the server, do not report to the user. - defer func() { _ = inv.TerminateSession(sid) }() - - items, err := inv.TraverseIterator(sid, &r, 0) - for err == nil && len(items) != 0 { - for j := range items { - b, err := items[j].TryBytes() - if err != nil { - return fmt.Errorf("%w: %v", errInvalidContainerResponse, err) - } - if err := f(b); err != nil { - return err - } - } - items, err = inv.TraverseIterator(sid, &r, 0) - } - return err -} - -func dumpContainers(cmd *cobra.Command, _ []string) error { - filename, err := cmd.Flags().GetString(containerDumpFlag) - if err != nil { - return fmt.Errorf("invalid filename: %w", err) - } - - c, err := helper.NewRemoteClient(viper.GetViper()) - if err != nil { - return fmt.Errorf("can't create N3 client: %w", err) - } - - inv := invoker.New(c, nil) - - ch, err := getContainerContractHash(cmd, inv) - if err != nil { - return fmt.Errorf("unable to get contaract hash: %w", err) - } - - isOK, err := getCIDFilterFunc(cmd) - if err != nil { - return err - } - - f, err := os.OpenFile(filename, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0o660) - if err != nil { - return err - } - defer f.Close() - - _, err = f.Write([]byte{'['}) - if err != nil { - return err - } - - written := 0 - enc := json.NewEncoder(f) - bw := io.NewBufBinWriter() - iterErr := iterateContainerList(inv, ch, func(id []byte) error { - if !isOK(id) { - return nil - } - - cnt, err := dumpSingleContainer(bw, ch, inv, id) - if err != nil { - return err - } - - // Writing directly to the file is ok, because json.Encoder does no internal buffering. - if written != 0 { - _, err = f.Write([]byte{','}) - if err != nil { - return err - } - } - - written++ - return enc.Encode(cnt) - }) - if iterErr != nil { - return iterErr - } - - _, err = f.Write([]byte{']'}) - return err -} - -func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) { - bw.Reset() - emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id) - res, err := inv.Run(bw.Bytes()) - if err != nil { - return nil, fmt.Errorf("can't get container info: %w", err) - } - if len(res.Stack) != 1 { - return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse) - } - - cnt := new(Container) - err = cnt.FromStackItem(res.Stack[0]) - if err != nil { - return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) - } - - return cnt, nil -} - -func listContainers(cmd *cobra.Command, _ []string) error { - c, err := helper.NewRemoteClient(viper.GetViper()) - if err != nil { - return fmt.Errorf("can't create N3 client: %w", err) - } - - inv := invoker.New(c, nil) - - ch, err := getContainerContractHash(cmd, inv) - if err != nil { - return fmt.Errorf("unable to get contaract hash: %w", err) - } - - return iterateContainerList(inv, ch, func(id []byte) error { - var idCnr cid.ID - err = idCnr.Decode(id) - if err != nil { - return fmt.Errorf("unable to decode container id: %w", err) - } - cmd.Println(idCnr) - return nil - }) -} - -func restoreContainers(cmd *cobra.Command, _ []string) error { - filename, err := cmd.Flags().GetString(containerDumpFlag) - if err != nil { - return fmt.Errorf("invalid filename: %w", err) - } - - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return err - } - defer wCtx.Close() - - containers, err := parseContainers(filename) - if err != nil { - return err - } - - ch, err := fetchContainerContractHash(wCtx) - if err != nil { - return err - } - - isOK, err := getCIDFilterFunc(cmd) - if err != nil { - return err - } - - err = restoreOrPutContainers(containers, isOK, cmd, wCtx, ch) - if err != nil { - return err - } - - return wCtx.AwaitTx() -} - -func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd *cobra.Command, wCtx *helper.InitializeContext, ch util.Uint160) error { - bw := io.NewBufBinWriter() - for _, cnt := range containers { - hv := hash.Sha256(cnt.Value) - if !isOK(hv[:]) { - continue - } - bw.Reset() - restored, err := isContainerRestored(cmd, wCtx, ch, bw, hv) - if err != nil { - return err - } - if restored { - continue - } - - bw.Reset() - - putContainer(bw, ch, cnt) - - assert.NoError(bw.Err) - - if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { - return err - } - } - return nil -} - -func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) { - emit.AppCall(bw.BinWriter, ch, "put", callflag.All, - cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token) -} - -func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) { - emit.AppCall(bw.BinWriter, containerHash, "get", callflag.All, hashValue.BytesBE()) - res, err := wCtx.Client.InvokeScript(bw.Bytes(), nil) - if err != nil { - return false, fmt.Errorf("can't check if container is already restored: %w", err) - } - if len(res.Stack) == 0 { - return false, errors.New("empty stack") - } - - old := new(Container) - if err := old.FromStackItem(res.Stack[0]); err != nil { - return false, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) - } - if len(old.Value) != 0 { - var id cid.ID - id.SetSHA256(hashValue) - cmd.Printf("Container %s is already deployed.\n", id) - return true, nil - } - - return false, nil -} - -func parseContainers(filename string) ([]Container, error) { - data, err := os.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("can't read dump file: %w", err) - } - - var containers []Container - err = json.Unmarshal(data, &containers) - if err != nil { - return nil, fmt.Errorf("can't parse dump file: %w", err) - } - return containers, nil -} - -func fetchContainerContractHash(wCtx *helper.InitializeContext) (util.Uint160, error) { - r := management.NewReader(wCtx.ReadOnlyInvoker) - nnsCs, err := helper.GetContractByID(r, 1) - if err != nil { - return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err) - } - - ch, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, nnsCs.Hash, helper.DomainOf(constants.ContainerContract)) - if err != nil { - return util.Uint160{}, fmt.Errorf("can't fetch container contract hash: %w", err) - } - return ch, nil -} - -// Container represents container struct in contract storage. -type Container struct { - Value []byte `json:"value"` - Signature []byte `json:"signature"` - PublicKey []byte `json:"public_key"` - Token []byte `json:"token"` -} - -// ToStackItem implements stackitem.Convertible. -func (c *Container) ToStackItem() (stackitem.Item, error) { - return stackitem.NewStruct([]stackitem.Item{ - stackitem.NewByteArray(c.Value), - stackitem.NewByteArray(c.Signature), - stackitem.NewByteArray(c.PublicKey), - stackitem.NewByteArray(c.Token), - }), nil -} - -// FromStackItem implements stackitem.Convertible. -func (c *Container) FromStackItem(item stackitem.Item) error { - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) != 4 { - return errors.New("invalid stack item type") - } - - value, err := arr[0].TryBytes() - if err != nil { - return errors.New("invalid container value") - } - - sig, err := arr[1].TryBytes() - if err != nil { - return errors.New("invalid container signature") - } - - pub, err := arr[2].TryBytes() - if err != nil { - return errors.New("invalid container public key") - } - - tok, err := arr[3].TryBytes() - if err != nil { - return errors.New("invalid container token") - } - - c.Value = value - c.Signature = sig - c.PublicKey = pub - c.Token = tok - return nil -} - -// getCIDFilterFunc returns filtering function for container IDs. -// Raw byte slices are used because it works with structures returned -// from contract. -func getCIDFilterFunc(cmd *cobra.Command) (func([]byte) bool, error) { - rawIDs, err := cmd.Flags().GetStringSlice(containerIDsFlag) - if err != nil { - return nil, err - } - if len(rawIDs) == 0 { - return func([]byte) bool { return true }, nil - } - - for i := range rawIDs { - err := new(cid.ID).DecodeString(rawIDs[i]) - if err != nil { - return nil, fmt.Errorf("can't parse CID %s: %w", rawIDs[i], err) - } - } - sort.Strings(rawIDs) - return func(rawID []byte) bool { - var v [32]byte - copy(v[:], rawID) - - var id cid.ID - id.SetSHA256(v) - idStr := id.EncodeToString() - _, found := slices.BinarySearch(rawIDs, idStr) - return found - }, nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/container/root.go b/cmd/frostfs-adm/internal/modules/morph/container/root.go deleted file mode 100644 index 2b314ab09..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/container/root.go +++ /dev/null @@ -1,68 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - containerDumpFlag = "dump" - containerContractFlag = "container-contract" - containerIDsFlag = "cid" -) - -var ( - DumpCmd = &cobra.Command{ - Use: "dump-containers", - Short: "Dump FrostFS containers to file", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: dumpContainers, - } - - RestoreCmd = &cobra.Command{ - Use: "restore-containers", - Short: "Restore FrostFS containers from file", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: restoreContainers, - } - - ListCmd = &cobra.Command{ - Use: "list-containers", - Short: "List FrostFS containers", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: listContainers, - } -) - -func initListContainersCmd() { - ListCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - ListCmd.Flags().String(containerContractFlag, "", "Container contract hash (for networks without NNS)") -} - -func initRestoreContainersCmd() { - RestoreCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - RestoreCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RestoreCmd.Flags().String(containerDumpFlag, "", "File to restore containers from") - RestoreCmd.Flags().StringSlice(containerIDsFlag, nil, "Containers to restore") -} - -func initDumpContainersCmd() { - DumpCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - DumpCmd.Flags().String(containerDumpFlag, "", "File where to save dumped containers") - DumpCmd.Flags().String(containerContractFlag, "", "Container contract hash (for networks without NNS)") - DumpCmd.Flags().StringSlice(containerIDsFlag, nil, "Containers to dump") -} - -func init() { - initDumpContainersCmd() - initRestoreContainersCmd() - initListContainersCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go deleted file mode 100644 index 543b5fcb3..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ /dev/null @@ -1,235 +0,0 @@ -package contract - -import ( - "encoding/json" - "fmt" - "os" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "github.com/nspcc-dev/neo-go/cli/cmdargs" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/services/rpcsrv/params" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - contractPathFlag = "contract" - updateFlag = "update" -) - -var DeployCmd = &cobra.Command{ - Use: "deploy", - Short: "Deploy additional smart-contracts", - Long: `Deploy additional smart-contract which are not related to core. -All contracts are deployed by the committee, so access to the alphabet wallets is required. -Optionally, arguments can be provided to be passed to a contract's _deploy function. -The syntax is the same as for 'neo-go contract testinvokefunction' command. -Compiled contract file name must contain '_contract.nef' suffix. -Contract's manifest file name must be 'config.json'. -NNS name is taken by stripping '_contract.nef' from the NEF file (similar to frostfs contracts).`, - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: deployContractCmd, -} - -func init() { - ff := DeployCmd.Flags() - - ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - _ = DeployCmd.MarkFlagFilename(commonflags.AlphabetWalletsFlag) - - ff.StringP(commonflags.EndpointFlag, "r", "", commonflags.EndpointFlagDesc) - ff.String(contractPathFlag, "", "Path to the contract directory") - _ = DeployCmd.MarkFlagFilename(contractPathFlag) - - ff.Bool(updateFlag, false, "Update an existing contract") - ff.String(commonflags.CustomZoneFlag, "frostfs", "Custom zone for NNS") -} - -func deployContractCmd(cmd *cobra.Command, args []string) error { - v := viper.GetViper() - c, err := helper.NewInitializeContext(cmd, v) - if err != nil { - return fmt.Errorf("initialization error: %w", err) - } - defer c.Close() - - ctrPath, _ := cmd.Flags().GetString(contractPathFlag) - ctrName, err := probeContractName(ctrPath) - if err != nil { - return err - } - - cs, err := helper.ReadContract(ctrPath, ctrName) - if err != nil { - return err - } - - r := management.NewReader(c.ReadOnlyInvoker) - nnsCs, err := helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't fetch NNS contract state: %w", err) - } - - callHash := management.Hash - method := constants.DeployMethodName - zone, _ := cmd.Flags().GetString(commonflags.CustomZoneFlag) - domain := ctrName + "." + zone - isUpdate, _ := cmd.Flags().GetBool(updateFlag) - if isUpdate { - cs.Hash, err = helper.NNSResolveHash(c.ReadOnlyInvoker, nnsCs.Hash, domain) - if err != nil { - return fmt.Errorf("can't fetch contract hash from NNS: %w", err) - } - callHash = cs.Hash - method = constants.UpdateMethodName - } else { - cs.Hash = state.CreateContractHash( - c.CommitteeAcc.Contract.ScriptHash(), - cs.NEF.Checksum, - cs.Manifest.Name) - } - - writer := io.NewBufBinWriter() - if err := emitDeploymentArguments(writer.BinWriter, args); err != nil { - return err - } - emit.Bytes(writer.BinWriter, cs.RawManifest) - emit.Bytes(writer.BinWriter, cs.RawNEF) - emit.Int(writer.BinWriter, 3) - emit.Opcodes(writer.BinWriter, opcode.PACK) - emit.AppCallNoArgs(writer.BinWriter, callHash, method, callflag.All) - emit.Opcodes(writer.BinWriter, opcode.DROP) // contract state on stack - if !isUpdate { - err := registerNNS(nnsCs, c, zone, domain, cs, writer) - if err != nil { - return err - } - } - - assert.NoError(writer.Err, "can't create deployment script") - - if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { - return err - } - return c.AwaitTx() -} - -func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string, domain string, cs *helper.ContractState, writer *io.BufBinWriter) error { - bw := io.NewBufBinWriter() - emit.Instruction(bw.BinWriter, opcode.INITSSLOT, []byte{1}) - emit.AppCall(bw.BinWriter, nnsCs.Hash, "getPrice", callflag.All) - emit.Opcodes(bw.BinWriter, opcode.STSFLD0) - emit.AppCall(bw.BinWriter, nnsCs.Hash, "setPrice", callflag.All, 1) - - start := bw.Len() - needRecord := false - - ok, err := c.NNSRootRegistered(nnsCs.Hash, zone) - if err != nil { - return err - } else if !ok { - needRecord = true - - emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All, - zone, c.CommitteeAcc.Contract.ScriptHash(), - constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal, - int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - - emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All, - domain, c.CommitteeAcc.Contract.ScriptHash(), - constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal, - int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - } else { - s, ok, err := c.NNSRegisterDomainScript(nnsCs.Hash, cs.Hash, domain) - if err != nil { - return err - } - needRecord = !ok - if len(s) != 0 { - bw.WriteBytes(s) - } - } - if needRecord { - emit.AppCall(bw.BinWriter, nnsCs.Hash, "deleteRecords", callflag.All, domain, int64(nns.TXT)) - emit.AppCall(bw.BinWriter, nnsCs.Hash, "addRecord", callflag.All, - domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) - } - - assert.NoError(bw.Err, "can't create deployment script") - if bw.Len() != start { - writer.WriteBytes(bw.Bytes()) - emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) - emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) - - if needRecord { - c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE()) - } - } - return nil -} - -func emitDeploymentArguments(w *io.BinWriter, args []string) error { - _, ps, err := cmdargs.ParseParams(args, true) - if err != nil { - return err - } - - if len(ps) == 0 { - emit.Opcodes(w, opcode.NEWARRAY0) - return nil - } - - if len(ps) != 1 { - return fmt.Errorf("at most one argument is expected for deploy, got %d", len(ps)) - } - - // We could emit this directly, but round-trip through JSON is more robust. - // This a CLI, so optimizing the conversion is not worth the effort. - data, err := json.Marshal(ps) - if err != nil { - return err - } - - var pp params.Params - if err := json.Unmarshal(data, &pp); err != nil { - return err - } - return params.ExpandArrayIntoScript(w, pp) -} - -func probeContractName(ctrPath string) (string, error) { - ds, err := os.ReadDir(ctrPath) - if err != nil { - return "", fmt.Errorf("can't read directory: %w", err) - } - - var ctrName string - for i := range ds { - if strings.HasSuffix(ds[i].Name(), "_contract.nef") { - ctrName = strings.TrimSuffix(ds[i].Name(), "_contract.nef") - break - } - } - - if ctrName == "" { - return "", fmt.Errorf("can't find any NEF files in %s", ctrPath) - } - return ctrName, nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go deleted file mode 100644 index fde58fd2b..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ /dev/null @@ -1,263 +0,0 @@ -package contract - -import ( - "bytes" - "errors" - "fmt" - "strings" - "text/tabwriter" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const lastGlagoliticLetter = 41 - -type contractDumpInfo struct { - hash util.Uint160 - name string - version string -} - -func dumpContractHashes(cmd *cobra.Command, _ []string) error { - c, err := helper.NewRemoteClient(viper.GetViper()) - if err != nil { - return fmt.Errorf("can't create N3 client: %w", err) - } - - r := management.NewReader(invoker.New(c, nil)) - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return err - } - - zone, _ := cmd.Flags().GetString(commonflags.CustomZoneFlag) - if zone != "" { - return dumpCustomZoneHashes(cmd, cs.Hash, zone, c) - } - - infos := []contractDumpInfo{{name: constants.NNSContract, hash: cs.Hash}} - - irSize := 0 - for ; irSize < lastGlagoliticLetter; irSize++ { - ok, err := helper.NNSIsAvailable(c, cs.Hash, helper.GetAlphabetNNSDomain(irSize)) - if err != nil { - return err - } else if ok { - break - } - } - - bw := io.NewBufBinWriter() - - if irSize != 0 { - bw.Reset() - for i := range irSize { - emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly, - helper.GetAlphabetNNSDomain(i), - int64(nns.TXT)) - } - - alphaRes, err := c.InvokeScript(bw.Bytes(), nil) - if err != nil { - return fmt.Errorf("can't fetch info from NNS: %w", err) - } - - for i := range irSize { - info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)} - if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil { - info.hash = h - } - infos = append(infos, info) - } - } - - for _, ctrName := range constants.ContractList { - bw.Reset() - emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly, - helper.DomainOf(ctrName), int64(nns.TXT)) - - res, err := c.InvokeScript(bw.Bytes(), nil) - if err != nil { - return fmt.Errorf("can't fetch info from NNS: %w", err) - } - - info := contractDumpInfo{name: ctrName} - if len(res.Stack) != 0 { - if h, err := helper.ParseNNSResolveResult(res.Stack[0]); err == nil { - info.hash = h - } - } - infos = append(infos, info) - } - - fillContractVersion(cmd, c, infos) - printContractInfo(cmd, infos) - - return nil -} - -func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string, c helper.Client) error { - const nnsMaxTokens = 100 - - inv := invoker.New(c, nil) - - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - - var infos []contractDumpInfo - processItem := func(item stackitem.Item) { - bs, err := item.TryBytes() - if err != nil { - cmd.PrintErrf("Invalid NNS record: %v\n", err) - return - } - - if !bytes.HasSuffix(bs, []byte(zone)) || bytes.HasPrefix(bs, []byte(morphClient.NNSGroupKeyName)) { - // Related https://github.com/nspcc-dev/neofs-contract/issues/316. - return - } - - h, err := helper.NNSResolveHash(inv, nnsHash, string(bs)) - if err != nil { - cmd.PrintErrf("Could not resolve name %s: %v\n", string(bs), err) - return - } - - infos = append(infos, contractDumpInfo{ - hash: h, - name: strings.TrimSuffix(string(bs), zone), - }) - } - - script, err := smartcontract.CreateCallAndPrefetchIteratorScript(nnsHash, "tokens", nnsMaxTokens) - if err != nil { - return fmt.Errorf("create prefetch script: %w", err) - } - - arr, sessionID, iter, err := unwrap.ArrayAndSessionIterator(inv.Run(script)) - if err != nil { - if errors.Is(err, unwrap.ErrNoSessionID) { - items, err := unwrap.Array(inv.CallAndExpandIterator(nnsHash, "tokens", nnsMaxTokens)) - if err != nil { - return fmt.Errorf("can't get a list of NNS domains: %w", err) - } - if len(items) == nnsMaxTokens { - cmd.PrintErrln("Provided RPC endpoint doesn't support sessions, some hashes might be lost.") - } - for i := range items { - processItem(items[i]) - } - } else { - return err - } - } else { - for i := range arr { - processItem(arr[i]) - } - - defer func() { - _ = inv.TerminateSession(sessionID) - }() - - items, err := inv.TraverseIterator(sessionID, &iter, 0) - for err == nil && len(items) != 0 { - for i := range items { - processItem(items[i]) - } - items, err = inv.TraverseIterator(sessionID, &iter, 0) - } - if err != nil { - return fmt.Errorf("error during NNS domains iteration: %w", err) - } - } - - fillContractVersion(cmd, c, infos) - printContractInfo(cmd, infos) - - return nil -} - -func parseContractVersion(item stackitem.Item) string { - bi, err := item.TryInteger() - if err != nil || bi.Sign() == 0 || !bi.IsInt64() { - return "unknown" - } - - v := bi.Int64() - major := v / 1_000_000 - minor := (v % 1_000_000) / 1000 - patch := v % 1_000 - return fmt.Sprintf("v%d.%d.%d", major, minor, patch) -} - -func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) { - if len(infos) == 0 { - return - } - - buf := bytes.NewBuffer(nil) - tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - for _, info := range infos { - if info.version == "" { - info.version = "unknown" - } - _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", - info.name, info.version, info.hash.StringLE())) - } - _ = tw.Flush() - - cmd.Print(buf.String()) -} - -func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDumpInfo) { - bw := io.NewBufBinWriter() - sub := io.NewBufBinWriter() - for i := range infos { - if infos[i].hash.Equals(util.Uint160{}) { - emit.Int(bw.BinWriter, 0) - } else { - sub.Reset() - emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - assert.NoError(sub.Err, "can't create version script") - - script := sub.Bytes() - emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.WriteBytes(script) - emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) - emit.Opcodes(bw.BinWriter, opcode.PUSH0) - } - } - emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - assert.NoError(bw.Err, "can't create version script") - - res, err := c.InvokeScript(bw.Bytes(), nil) - if err != nil { - cmd.Printf("Can't fetch version from NNS: %v\n", err) - return - } - - if res.State == vmstate.Halt.String() { - for i := range res.Stack { - infos[i].version = parseContractVersion(res.Stack[i]) - } - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/root.go b/cmd/frostfs-adm/internal/modules/morph/contract/root.go deleted file mode 100644 index 9bad2bd66..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/contract/root.go +++ /dev/null @@ -1,45 +0,0 @@ -package contract - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - DumpHashesCmd = &cobra.Command{ - Use: "dump-hashes", - Short: "Dump deployed contract hashes", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: dumpContractHashes, - } - UpdateCmd = &cobra.Command{ - Use: "update-contracts", - Short: "Update FrostFS contracts", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: updateContracts, - } -) - -func initDumpContractHashesCmd() { - DumpHashesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - DumpHashesCmd.Flags().String(commonflags.CustomZoneFlag, "", "Custom zone to search.") -} - -func initUpdateContractsCmd() { - UpdateCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - UpdateCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - UpdateCmd.Flags().String(commonflags.ContractsInitFlag, "", commonflags.ContractsInitFlagDesc) - UpdateCmd.Flags().String(commonflags.ContractsURLFlag, "", commonflags.ContractsURLFlagDesc) - UpdateCmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag) -} - -func init() { - initDumpContractHashesCmd() - initUpdateContractsCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/update.go b/cmd/frostfs-adm/internal/modules/morph/contract/update.go deleted file mode 100644 index 109849aab..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/contract/update.go +++ /dev/null @@ -1,197 +0,0 @@ -package contract - -import ( - "encoding/hex" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/common" - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - io2 "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - neoUtil "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var errMissingNNSRecord = errors.New("missing NNS record") - -func updateContracts(cmd *cobra.Command, _ []string) error { - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("initialization error: %w", err) - } - - if err := helper.DeployNNS(wCtx, constants.UpdateMethodName); err != nil { - return err - } - - return updateContractsInternal(wCtx) -} - -func updateContractsInternal(c *helper.InitializeContext) error { - alphaCs := c.GetContract(constants.AlphabetContract) - - nnsCs, err := c.NNSContractState() - if err != nil { - return err - } - nnsHash := nnsCs.Hash - - w := io2.NewBufBinWriter() - - // Update script size for a single-node committee is close to the maximum allowed size of 65535. - // Because of this we want to reuse alphabet contract NEF and manifest for different updates. - // The generated script is as following. - // 1. Initialize static slot for alphabet NEF. - // 2. Store NEF into the static slot. - // 3. Push parameters for each alphabet contract on stack. - // 4. Add contract group to the manifest. - // 5. For each alphabet contract, invoke `update` using parameters on stack and - // NEF from step 2 and manifest from step 4. - emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1}) - emit.Bytes(w.BinWriter, alphaCs.RawNEF) - emit.Opcodes(w.BinWriter, opcode.STSFLD0) - - keysParam, err := deployAlphabetAccounts(c, nnsHash, w, alphaCs) - if err != nil { - return err - } - - w.Reset() - - if err = deployOrUpdateContracts(c, w, nnsHash, keysParam); err != nil { - return err - } - - groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey() - _, _, err = c.EmitUpdateNNSGroupScript(w, nnsHash, groupKey) - if err != nil { - return err - } - c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes())) - - emit.Opcodes(w.BinWriter, opcode.LDSFLD0) - emit.Int(w.BinWriter, 1) - emit.Opcodes(w.BinWriter, opcode.PACK) - emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - - if err := c.SendCommitteeTx(w.Bytes(), false); err != nil { - return err - } - return c.AwaitTx() -} - -func deployAlphabetAccounts(c *helper.InitializeContext, nnsHash neoUtil.Uint160, w *io2.BufBinWriter, alphaCs *helper.ContractState) ([]any, error) { - var keysParam []any - - baseGroups := alphaCs.Manifest.Groups - - // alphabet contracts should be deployed by individual nodes to get different hashes. - for i, acc := range c.Accounts { - ctrHash, err := helper.NNSResolveHash(c.ReadOnlyInvoker, nnsHash, helper.GetAlphabetNNSDomain(i)) - if err != nil { - return nil, fmt.Errorf("can't resolve hash for contract update: %w", err) - } - - keysParam = append(keysParam, acc.PrivateKey().PublicKey().Bytes()) - - params := c.GetAlphabetDeployItems(i, len(c.Wallets)) - emit.Array(w.BinWriter, params...) - - alphaCs.Manifest.Groups = baseGroups - err = helper.AddManifestGroup(c.ContractWallet, ctrHash, alphaCs) - if err != nil { - return nil, fmt.Errorf("can't sign manifest group: %v", err) - } - - emit.Bytes(w.BinWriter, alphaCs.RawManifest) - emit.Opcodes(w.BinWriter, opcode.LDSFLD0) - emit.Int(w.BinWriter, 3) - emit.Opcodes(w.BinWriter, opcode.PACK) - emit.AppCallNoArgs(w.BinWriter, ctrHash, constants.UpdateMethodName, callflag.All) - } - if err := c.SendCommitteeTx(w.Bytes(), false); err != nil { - if !strings.Contains(err.Error(), common.ErrAlreadyUpdated) { - return nil, err - } - c.Command.Println("Alphabet contracts are already updated.") - } - - return keysParam, nil -} - -func deployOrUpdateContracts(c *helper.InitializeContext, w *io2.BufBinWriter, nnsHash neoUtil.Uint160, keysParam []any) error { - emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1}) - emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All) - emit.Opcodes(w.BinWriter, opcode.STSFLD0) - emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1) - - for _, ctrName := range constants.ContractList { - cs := c.GetContract(ctrName) - - method := constants.UpdateMethodName - ctrHash, err := helper.NNSResolveHash(c.ReadOnlyInvoker, nnsHash, helper.DomainOf(ctrName)) - if err != nil { - if errors.Is(err, errMissingNNSRecord) { - // if contract not found we deploy it instead of update - method = constants.DeployMethodName - } else { - return fmt.Errorf("can't resolve hash for contract update: %w", err) - } - } - - err = helper.AddManifestGroup(c.ContractWallet, ctrHash, cs) - if err != nil { - return fmt.Errorf("can't sign manifest group: %v", err) - } - - invokeHash := management.Hash - if method == constants.UpdateMethodName { - invokeHash = ctrHash - } - - args, err := helper.GetContractDeployData(c, ctrName, keysParam, constants.UpdateMethodName) - if err != nil { - return fmt.Errorf("%s: getting update params: %v", ctrName, err) - } - params := helper.GetContractDeployParameters(cs, args) - res, err := c.CommitteeAct.MakeCall(invokeHash, method, params...) - if err != nil { - if method != constants.UpdateMethodName || !strings.Contains(err.Error(), common.ErrAlreadyUpdated) { - return fmt.Errorf("deploy contract: %w", err) - } - c.Command.Printf("%s contract is already updated.\n", ctrName) - continue - } - - w.WriteBytes(res.Script) - - if method == constants.DeployMethodName { - // same actions are done in InitializeContext.setNNS, can be unified - domain := ctrName + ".frostfs" - script, ok, err := c.NNSRegisterDomainScript(nnsHash, cs.Hash, domain) - if err != nil { - return err - } - if !ok { - w.WriteBytes(script) - emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT)) - emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All, - domain, int64(nns.TXT), cs.Hash.StringLE()) - emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All, - domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) - } - c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE()) - } - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go deleted file mode 100644 index 4046e85e3..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go +++ /dev/null @@ -1,83 +0,0 @@ -package frostfsid - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - frostfsidAddSubjectKeyCmd = &cobra.Command{ - Use: "add-subject-key", - Short: "Add a public key to the subject in frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidAddSubjectKey, - } - frostfsidRemoveSubjectKeyCmd = &cobra.Command{ - Use: "remove-subject-key", - Short: "Remove a public key from the subject in frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidRemoveSubjectKey, - } -) - -func initFrostfsIDAddSubjectKeyCmd() { - Cmd.AddCommand(frostfsidAddSubjectKeyCmd) - - ff := frostfsidAddSubjectKeyCmd.Flags() - ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - - ff.String(subjectAddressFlag, "", "Subject address") - _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag) - - ff.String(subjectKeyFlag, "", "Public key to add") - _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag) -} - -func initFrostfsIDRemoveSubjectKeyCmd() { - Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd) - - ff := frostfsidRemoveSubjectKeyCmd.Flags() - ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - - ff.String(subjectAddressFlag, "", "Subject address") - _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag) - - ff.String(subjectKeyFlag, "", "Public key to remove") - _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag) -} - -func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) { - addr := getFrostfsIDSubjectAddress(cmd) - pub := getFrostfsIDSubjectKey(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "add subject key: %w", err) -} - -func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) { - addr := getFrostfsIDSubjectAddress(cmd) - pub := getFrostfsIDSubjectKey(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "remove subject key: %w", err) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go deleted file mode 100644 index 7f777db98..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ /dev/null @@ -1,629 +0,0 @@ -package frostfsid - -import ( - "encoding/hex" - "errors" - "fmt" - "math/big" - "sort" - - frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - frostfsidrpclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const iteratorBatchSize = 1 - -const ( - namespaceFlag = "namespace" - subjectNameFlag = "subject-name" - subjectKeyFlag = "subject-key" - subjectAddressFlag = "subject-address" - extendedFlag = "extended" - groupNameFlag = "group-name" - groupIDFlag = "group-id" - - rootNamespacePlaceholder = "" - - keyFlag = "key" - keyDescFlag = "Key for storing a value in the subject's KV storage" - valueFlag = "value" - valueDescFlag = "Value to be stored in the subject's KV storage" -) - -var ( - Cmd = &cobra.Command{ - Use: "frostfsid", - Short: "Section for frostfsid interactions commands", - } - - frostfsidCreateNamespaceCmd = &cobra.Command{ - Use: "create-namespace", - Short: "Create new namespace in frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidCreateNamespace, - } - - frostfsidListNamespacesCmd = &cobra.Command{ - Use: "list-namespaces", - Short: "List all namespaces in frostfsid", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidListNamespaces, - } - - frostfsidCreateSubjectCmd = &cobra.Command{ - Use: "create-subject", - Short: "Create subject in frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidCreateSubject, - } - - frostfsidDeleteSubjectCmd = &cobra.Command{ - Use: "delete-subject", - Short: "Delete subject from frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidDeleteSubject, - } - - frostfsidListSubjectsCmd = &cobra.Command{ - Use: "list-subjects", - Short: "List subjects in namespace", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidListSubjects, - } - - frostfsidCreateGroupCmd = &cobra.Command{ - Use: "create-group", - Short: "Create group in frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidCreateGroup, - } - - frostfsidDeleteGroupCmd = &cobra.Command{ - Use: "delete-group", - Short: "Delete group from frostfsid contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidDeleteGroup, - } - - frostfsidListGroupsCmd = &cobra.Command{ - Use: "list-groups", - Short: "List groups in namespace", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidListGroups, - } - - frostfsidAddSubjectToGroupCmd = &cobra.Command{ - Use: "add-subject-to-group", - Short: "Add subject to group", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidAddSubjectToGroup, - } - - frostfsidRemoveSubjectFromGroupCmd = &cobra.Command{ - Use: "remove-subject-from-group", - Short: "Remove subject from group", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidRemoveSubjectFromGroup, - } - - frostfsidListGroupSubjectsCmd = &cobra.Command{ - Use: "list-group-subjects", - Short: "List subjects in group", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidListGroupSubjects, - } - - frostfsidSetKVCmd = &cobra.Command{ - Use: "set-kv", - Short: "Store a key-value pair in the subject's KV storage", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidSetKV, - } - frostfsidDeleteKVCmd = &cobra.Command{ - Use: "delete-kv", - Short: "Delete a value from the subject's KV storage", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidDeleteKV, - } -) - -func initFrostfsIDCreateNamespaceCmd() { - Cmd.AddCommand(frostfsidCreateNamespaceCmd) - frostfsidCreateNamespaceCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidCreateNamespaceCmd.Flags().String(namespaceFlag, "", "Namespace name to create") - frostfsidCreateNamespaceCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - _ = frostfsidCreateNamespaceCmd.MarkFlagRequired(namespaceFlag) -} - -func initFrostfsIDListNamespacesCmd() { - Cmd.AddCommand(frostfsidListNamespacesCmd) - frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) -} - -func initFrostfsIDCreateSubjectCmd() { - Cmd.AddCommand(frostfsidCreateSubjectCmd) - frostfsidCreateSubjectCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidCreateSubjectCmd.Flags().String(namespaceFlag, "", "Namespace where create subject") - frostfsidCreateSubjectCmd.Flags().String(subjectNameFlag, "", "Subject name, must be unique in namespace") - frostfsidCreateSubjectCmd.Flags().String(subjectKeyFlag, "", "Subject hex-encoded public key") - frostfsidCreateSubjectCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func initFrostfsIDDeleteSubjectCmd() { - Cmd.AddCommand(frostfsidDeleteSubjectCmd) - frostfsidDeleteSubjectCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidDeleteSubjectCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidDeleteSubjectCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func initFrostfsIDListSubjectsCmd() { - Cmd.AddCommand(frostfsidListSubjectsCmd) - frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") - frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") -} - -func initFrostfsIDCreateGroupCmd() { - Cmd.AddCommand(frostfsidCreateGroupCmd) - frostfsidCreateGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidCreateGroupCmd.Flags().String(namespaceFlag, "", "Namespace where create group") - frostfsidCreateGroupCmd.Flags().String(groupNameFlag, "", "Group name, must be unique in namespace") - frostfsidCreateGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - _ = frostfsidCreateGroupCmd.MarkFlagRequired(groupNameFlag) -} - -func initFrostfsIDDeleteGroupCmd() { - Cmd.AddCommand(frostfsidDeleteGroupCmd) - frostfsidDeleteGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidDeleteGroupCmd.Flags().String(namespaceFlag, "", "Namespace to delete group") - frostfsidDeleteGroupCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidDeleteGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func initFrostfsIDListGroupsCmd() { - Cmd.AddCommand(frostfsidListGroupsCmd) - frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups") -} - -func initFrostfsIDAddSubjectToGroupCmd() { - Cmd.AddCommand(frostfsidAddSubjectToGroupCmd) - frostfsidAddSubjectToGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidAddSubjectToGroupCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidAddSubjectToGroupCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidAddSubjectToGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func initFrostfsIDRemoveSubjectFromGroupCmd() { - Cmd.AddCommand(frostfsidRemoveSubjectFromGroupCmd) - frostfsidRemoveSubjectFromGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidRemoveSubjectFromGroupCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidRemoveSubjectFromGroupCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidRemoveSubjectFromGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func initFrostfsIDListGroupSubjectsCmd() { - Cmd.AddCommand(frostfsidListGroupSubjectsCmd) - frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") - frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") -} - -func initFrostfsIDSetKVCmd() { - Cmd.AddCommand(frostfsidSetKVCmd) - frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag) - frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) -} - -func initFrostfsIDDeleteKVCmd() { - Cmd.AddCommand(frostfsidDeleteKVCmd) - frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag) -} - -func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { - ns := getFrostfsIDNamespace(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.CreateNamespaceCall(ns)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "create namespace error: %w", err) -} - -func frostfsidListNamespaces(cmd *cobra.Command, _ []string) { - inv, _, hash := initInvoker(cmd) - reader := frostfsidrpclient.NewReader(inv, hash) - sessionID, it, err := reader.ListNamespaces() - commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, sessionID) - commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) - - namespaces, err := frostfsidclient.ParseNamespaces(items) - commonCmd.ExitOnErr(cmd, "can't parse namespace: %w", err) - sort.Slice(namespaces, func(i, j int) bool { return namespaces[i].Name < namespaces[j].Name }) - - for _, namespace := range namespaces { - if namespace.Name == "" { - namespace.Name = rootNamespacePlaceholder - } - cmd.Printf("%s\n", namespace.Name) - } -} - -func frostfsidCreateSubject(cmd *cobra.Command, _ []string) { - ns := getFrostfsIDNamespace(cmd) - subjName := getFrostfsIDSubjectName(cmd) - subjKey := getFrostfsIDSubjectKey(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.CreateSubjectCall(ns, subjKey)) - if subjName != "" { - ffsid.addCall(ffsid.roCli.SetSubjectNameCall(subjKey.GetScriptHash(), subjName)) - } - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "create subject: %w", err) -} - -func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.DeleteSubjectCall(subjectAddress)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "delete subject error: %w", err) -} - -func frostfsidListSubjects(cmd *cobra.Command, _ []string) { - extended, _ := cmd.Flags().GetBool(extendedFlag) - ns := getFrostfsIDNamespace(cmd) - inv, _, hash := initInvoker(cmd) - reader := frostfsidrpclient.NewReader(inv, hash) - sessionID, it, err := reader.ListNamespaceSubjects(ns) - commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - - subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) - commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) - - sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) - - for _, addr := range subAddresses { - if !extended { - cmd.Println(address.Uint160ToString(addr)) - continue - } - - items, err := reader.GetSubject(addr) - commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - - subj, err := frostfsidclient.ParseSubject(items) - commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - - printSubjectInfo(cmd, addr, subj) - cmd.Println() - } -} - -func frostfsidCreateGroup(cmd *cobra.Command, _ []string) { - ns := getFrostfsIDNamespace(cmd) - groupName := getFrostfsIDGroupName(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.CreateGroupCall(ns, groupName)) - - groupID, err := ffsid.roCli.ParseGroupID(ffsid.sendWaitRes()) - commonCmd.ExitOnErr(cmd, "create group: %w", err) - - cmd.Printf("group '%s' created with id: %d\n", groupName, groupID) -} - -func frostfsidDeleteGroup(cmd *cobra.Command, _ []string) { - ns := getFrostfsIDNamespace(cmd) - groupID := getFrostfsIDGroupID(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.DeleteGroupCall(ns, groupID)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "delete group error: %w", err) -} - -func frostfsidListGroups(cmd *cobra.Command, _ []string) { - inv, _, hash := initInvoker(cmd) - ns := getFrostfsIDNamespace(cmd) - - reader := frostfsidrpclient.NewReader(inv, hash) - sessionID, it, err := reader.ListGroups(ns) - commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - - items, err := readIterator(inv, &it, sessionID) - commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) - groups, err := frostfsidclient.ParseGroups(items) - commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) - - sort.Slice(groups, func(i, j int) bool { return groups[i].Name < groups[j].Name }) - - for _, group := range groups { - cmd.Printf("%s (%d)\n", group.Name, group.ID) - } -} - -func frostfsidAddSubjectToGroup(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - groupID := getFrostfsIDGroupID(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.AddSubjectToGroupCall(subjectAddress, groupID)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "add subject to group error: %w", err) -} - -func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - groupID := getFrostfsIDGroupID(cmd) - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - ffsid.addCall(ffsid.roCli.RemoveSubjectFromGroupCall(subjectAddress, groupID)) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) -} - -func frostfsidSetKV(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - key, _ := cmd.Flags().GetString(keyFlag) - value, _ := cmd.Flags().GetString(valueFlag) - - if key == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) - } - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value) - - ffsid.addCall(method, args) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "set KV: %w", err) -} - -func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - key, _ := cmd.Flags().GetString(keyFlag) - - if key == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) - } - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key) - - ffsid.addCall(method, args) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "delete KV: %w", err) -} - -func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { - ns := getFrostfsIDNamespace(cmd) - groupID := getFrostfsIDGroupID(cmd) - extended, _ := cmd.Flags().GetBool(extendedFlag) - inv, cs, hash := initInvoker(cmd) - _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) - commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) - - reader := frostfsidrpclient.NewReader(inv, hash) - sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) - commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) - - items, err := readIterator(inv, &it, sessionID) - commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) - - subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) - commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) - - sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) - - for _, subjAddr := range subjects { - if !extended { - cmd.Println(address.Uint160ToString(subjAddr)) - continue - } - - items, err := reader.GetSubject(subjAddr) - commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - subj, err := frostfsidclient.ParseSubject(items) - commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - printSubjectInfo(cmd, subjAddr, subj) - cmd.Println() - } -} - -type frostfsidClient struct { - bw *io.BufBinWriter - contractHash util.Uint160 - roCli *frostfsidclient.Client // client can be used only for waiting tx, parsing and forming method params - wCtx *helper.InitializeContext -} - -func newFrostfsIDClient(cmd *cobra.Command) (*frostfsidClient, error) { - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return nil, fmt.Errorf("can't initialize context: %w", err) - } - - r := management.NewReader(wCtx.ReadOnlyInvoker) - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return nil, fmt.Errorf("can't get NNS contract info: %w", err) - } - - ffsidHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) - if err != nil { - return nil, fmt.Errorf("can't get proxy contract hash: %w", err) - } - - return &frostfsidClient{ - bw: io.NewBufBinWriter(), - contractHash: ffsidHash, - roCli: frostfsidclient.NewSimple(wCtx.CommitteeAct, ffsidHash), - wCtx: wCtx, - }, nil -} - -func (f *frostfsidClient) addCall(method string, args []any) { - emit.AppCall(f.bw.BinWriter, f.contractHash, method, callflag.All, args...) -} - -func (f *frostfsidClient) sendWait() error { - if err := f.wCtx.SendConsensusTx(f.bw.Bytes()); err != nil { - return err - } - f.bw.Reset() - - return f.wCtx.AwaitTx() -} - -func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) { - if err := f.wCtx.SendConsensusTx(f.bw.Bytes()); err != nil { - return nil, err - } - f.bw.Reset() - - f.wCtx.Command.Println("Waiting for transactions to persist...") - return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) -} - -func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { - var shouldStop bool - res := make([]stackitem.Item, 0) - for !shouldStop { - items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) - if err != nil { - return nil, err - } - - res = append(res, items...) - shouldStop = len(items) < iteratorBatchSize - } - - return res, nil -} - -func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) { - c, err := helper.NewRemoteClient(viper.GetViper()) - commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) - - inv := invoker.New(c, nil) - r := management.NewReader(inv) - - cs, err := r.GetContractByID(1) - commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err) - - nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) - commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) - - return inv, cs, nmHash -} - -func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) { - cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) - pk := "" - if subj.PrimaryKey != nil { - pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) - } - cmd.Printf("Primary key: %s\n", pk) - cmd.Printf("Name: %s\n", subj.Name) - cmd.Printf("Namespace: %s\n", subj.Namespace) - if len(subj.AdditionalKeys) > 0 { - cmd.Printf("Additional keys:\n") - for _, key := range subj.AdditionalKeys { - k := "" - if key != nil { - k = hex.EncodeToString(key.Bytes()) - } - cmd.Printf("- %s\n", k) - } - } - if len(subj.KV) > 0 { - cmd.Printf("KV:\n") - for k, v := range subj.KV { - cmd.Printf("- %s: %s\n", k, v) - } - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util.go deleted file mode 100644 index 541a459c1..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util.go +++ /dev/null @@ -1,77 +0,0 @@ -package frostfsid - -import ( - "errors" - "fmt" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/cobra" -) - -func getFrostfsIDSubjectKey(cmd *cobra.Command) *keys.PublicKey { - subjKeyHex, _ := cmd.Flags().GetString(subjectKeyFlag) - subjKey, err := keys.NewPublicKeyFromString(subjKeyHex) - commonCmd.ExitOnErr(cmd, "invalid subject key: %w", err) - return subjKey -} - -func getFrostfsIDSubjectAddress(cmd *cobra.Command) util.Uint160 { - subjAddress, _ := cmd.Flags().GetString(subjectAddressFlag) - subjAddr, err := address.StringToUint160(subjAddress) - commonCmd.ExitOnErr(cmd, "invalid subject address: %w", err) - return subjAddr -} - -func getFrostfsIDSubjectName(cmd *cobra.Command) string { - subjectName, _ := cmd.Flags().GetString(subjectNameFlag) - - if subjectName == "" { - return "" - } - - if !ape.SubjectNameRegexp.MatchString(subjectName) { - commonCmd.ExitOnErr(cmd, "invalid subject name: %w", - fmt.Errorf("name must match regexp: %s", ape.SubjectNameRegexp.String())) - } - - return subjectName -} - -func getFrostfsIDGroupName(cmd *cobra.Command) string { - groupName, _ := cmd.Flags().GetString(groupNameFlag) - - if !ape.GroupNameRegexp.MatchString(groupName) { - commonCmd.ExitOnErr(cmd, "invalid group name: %w", - fmt.Errorf("name must match regexp: %s", ape.GroupNameRegexp.String())) - } - - return groupName -} - -func getFrostfsIDGroupID(cmd *cobra.Command) int64 { - groupID, _ := cmd.Flags().GetInt64(groupIDFlag) - if groupID <= 0 { - commonCmd.ExitOnErr(cmd, "invalid group id: %w", - errors.New("group id must be positive integer")) - } - - return groupID -} - -func getFrostfsIDNamespace(cmd *cobra.Command) string { - ns, _ := cmd.Flags().GetString(namespaceFlag) - if ns == rootNamespacePlaceholder { - ns = "" - } - - if !ape.NamespaceNameRegexp.MatchString(ns) { - commonCmd.ExitOnErr(cmd, "invalid namespace: %w", - fmt.Errorf("name must match regexp: %s", ape.NamespaceNameRegexp.String())) - } - - return ns -} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go deleted file mode 100644 index 1d0bc8441..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package frostfsid - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" - "github.com/stretchr/testify/require" -) - -func TestNamespaceRegexp(t *testing.T) { - for _, tc := range []struct { - name string - namespace string - matched bool - }{ - { - name: "root empty ns", - namespace: "", - matched: true, - }, - { - name: "simple valid ns", - namespace: "my-namespace-123", - matched: true, - }, - { - name: "root placeholder", - namespace: "", - matched: false, - }, - { - name: "too long", - namespace: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz", - matched: false, - }, - { - name: "start with hyphen", - namespace: "-ns", - matched: false, - }, - { - name: "end with hyphen", - namespace: "ns-", - matched: false, - }, - { - name: "with spaces", - namespace: "ns ns", - matched: false, - }, - } { - t.Run(tc.name, func(t *testing.T) { - require.Equal(t, tc.matched, ape.NamespaceNameRegexp.MatchString(tc.namespace)) - }) - } -} - -func TestSubjectNameRegexp(t *testing.T) { - for _, tc := range []struct { - name string - subject string - matched bool - }{ - { - name: "empty", - subject: "", - matched: false, - }, - { - name: "invalid", - subject: "invalid{name}", - matched: false, - }, - { - name: "too long", - subject: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz", - matched: false, - }, - { - name: "valid", - subject: "valid_name.012345@6789", - matched: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - require.Equal(t, tc.matched, ape.SubjectNameRegexp.MatchString(tc.subject)) - }) - } -} - -func TestSubjectGroupRegexp(t *testing.T) { - for _, tc := range []struct { - name string - subject string - matched bool - }{ - { - name: "empty", - subject: "", - matched: false, - }, - { - name: "invalid", - subject: "invalid{name}", - matched: false, - }, - { - name: "too long", - subject: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz", - matched: false, - }, - { - name: "long", - subject: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz", - matched: true, - }, - { - name: "valid", - subject: "valid_name.012345@6789", - matched: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - require.Equal(t, tc.matched, ape.GroupNameRegexp.MatchString(tc.subject)) - }) - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go deleted file mode 100644 index 8aad5c5c1..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ /dev/null @@ -1,19 +0,0 @@ -package frostfsid - -func init() { - initFrostfsIDCreateNamespaceCmd() - initFrostfsIDListNamespacesCmd() - initFrostfsIDCreateSubjectCmd() - initFrostfsIDDeleteSubjectCmd() - initFrostfsIDListSubjectsCmd() - initFrostfsIDCreateGroupCmd() - initFrostfsIDDeleteGroupCmd() - initFrostfsIDListGroupsCmd() - initFrostfsIDAddSubjectToGroupCmd() - initFrostfsIDRemoveSubjectFromGroupCmd() - initFrostfsIDListGroupSubjectsCmd() - initFrostfsIDSetKVCmd() - initFrostfsIDDeleteKVCmd() - initFrostfsIDAddSubjectKeyCmd() - initFrostfsIDRemoveSubjectKeyCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go deleted file mode 100644 index 78f8617f1..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ /dev/null @@ -1,193 +0,0 @@ -package generate - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" -) - -func AlphabetCreds(cmd *cobra.Command, _ []string) error { - // alphabet size is not part of the config - size, err := cmd.Flags().GetUint(commonflags.AlphabetSizeFlag) - if err != nil { - return err - } - if size == 0 { - return errors.New("size must be > 0") - } - if size > constants.MaxAlphabetNodes { - return helper.ErrTooManyAlphabetNodes - } - - v := viper.GetViper() - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) - pwds, err := initializeWallets(v, walletDir, int(size)) - if err != nil { - return err - } - - _, err = helper.InitializeContractWallet(v, walletDir) - if err != nil { - return err - } - - cmd.Println("size:", size) - cmd.Println("alphabet-wallets:", walletDir) - for i := range pwds { - cmd.Printf("wallet[%d]: %s\n", i, pwds[i]) - } - - return nil -} - -func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, error) { - wallets := make([]*wallet.Wallet, size) - pubs := make(keys.PublicKeys, size) - passwords := make([]string, size) - - var errG errgroup.Group - - for i := range wallets { - password, err := config.GetPassword(v, innerring.GlagoliticLetter(i).String()) - if err != nil { - return nil, fmt.Errorf("can't fetch password: %w", err) - } - - errG.Go(func() error { - p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") - f, err := os.OpenFile(p, os.O_CREATE, 0o644) - if err != nil { - return fmt.Errorf("can't create wallet file: %w", err) - } - if err := f.Close(); err != nil { - return fmt.Errorf("can't close wallet file: %w", err) - } - w, err := wallet.NewWallet(p) - if err != nil { - return fmt.Errorf("can't create wallet: %w", err) - } - if err := w.CreateAccount(constants.SingleAccountName, password); err != nil { - return fmt.Errorf("can't create account: %w", err) - } - - passwords[i] = password - wallets[i] = w - pubs[i] = w.Accounts[0].PrivateKey().PublicKey() - return nil - }) - } - - if err := errG.Wait(); err != nil { - return nil, err - } - - // Create committee account with N/2+1 multi-signature. - majCount := smartcontract.GetMajorityHonestNodeCount(size) - // Create consensus account with 2*N/3+1 multi-signature. - bftCount := smartcontract.GetDefaultHonestNodeCount(size) - for i := range wallets { - ps := pubs.Copy() - errG.Go(func() error { - if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil { - return fmt.Errorf("can't create committee account: %w", err) - } - if err := addMultisigAccount(wallets[i], bftCount, constants.ConsensusAccountName, passwords[i], ps); err != nil { - return fmt.Errorf("can't create consentus account: %w", err) - } - if err := wallets[i].SavePretty(); err != nil { - return fmt.Errorf("can't save wallet: %w", err) - } - return nil - }) - } - if err := errG.Wait(); err != nil { - return nil, err - } - return passwords, nil -} - -func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs keys.PublicKeys) error { - acc := wallet.NewAccountFromPrivateKey(w.Accounts[0].PrivateKey()) - acc.Label = name - - if err := acc.ConvertMultisig(m, pubs); err != nil { - return err - } - if err := acc.Encrypt(password, keys.NEP2ScryptParams()); err != nil { - return err - } - w.AddAccount(acc) - return nil -} - -func generateStorageCreds(cmd *cobra.Command, _ []string) error { - walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) - w, err := wallet.NewWallet(walletPath) - if err != nil { - return fmt.Errorf("create wallet: %w", err) - } - - label, _ := cmd.Flags().GetString(storageWalletLabelFlag) - password, err := config.GetStoragePassword(viper.GetViper(), label) - if err != nil { - return fmt.Errorf("can't fetch password: %w", err) - } - - if label == "" { - label = constants.SingleAccountName - } - - if err := w.CreateAccount(label, password); err != nil { - return fmt.Errorf("can't create account: %w", err) - } - return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash()) -} - -func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) { - gasStr := viper.GetString(gasFlag) - - gasAmount, err := helper.ParseGASAmount(gasStr) - if err != nil { - return err - } - - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return err - } - - bw := io.NewBufBinWriter() - for _, gasReceiver := range gasReceivers { - emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, - wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - } - if bw.Err != nil { - return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err) - } - - if err := wCtx.SendCommitteeTx(bw.Bytes(), false); err != nil { - return err - } - - return wCtx.AwaitTx() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go deleted file mode 100644 index 15af5637b..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package generate - -import ( - "bytes" - "io" - "math/rand" - "os" - "path/filepath" - "strconv" - "sync" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - "golang.org/x/term" -) - -func TestGenerateAlphabet(t *testing.T) { - walletDir := t.TempDir() - buf := setupTestTerminal(t) - - cmd := GenerateAlphabetCmd - v := viper.GetViper() - - t.Run("zero size", func(t *testing.T) { - buf.Reset() - v.Set(commonflags.AlphabetWalletsFlag, walletDir) - require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "0")) - buf.WriteString("pass\r") - require.Error(t, AlphabetCreds(cmd, nil)) - }) - t.Run("no password provided", func(t *testing.T) { - buf.Reset() - v.Set(commonflags.AlphabetWalletsFlag, walletDir) - require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1")) - require.Error(t, AlphabetCreds(cmd, nil)) - }) - t.Run("missing directory", func(t *testing.T) { - buf.Reset() - dir := filepath.Join(os.TempDir(), "notexist."+strconv.FormatUint(rand.Uint64(), 10)) - v.Set(commonflags.AlphabetWalletsFlag, dir) - require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1")) - buf.WriteString("pass\r") - require.Error(t, AlphabetCreds(cmd, nil)) - }) - t.Run("no password for contract group wallet", func(t *testing.T) { - buf.Reset() - v.Set(commonflags.AlphabetWalletsFlag, walletDir) - require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1")) - buf.WriteString("pass\r") - require.Error(t, AlphabetCreds(cmd, nil)) - }) - - const size = 4 - - buf.Reset() - v.Set(commonflags.AlphabetWalletsFlag, walletDir) - require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10))) - for i := range uint64(size) { - buf.WriteString(strconv.FormatUint(i, 10) + "\r") - } - - buf.WriteString(constants.TestContractPassword + "\r") - require.NoError(t, AlphabetCreds(GenerateAlphabetCmd, nil)) - - var wg sync.WaitGroup - for i := uint64(0); i < size; i++ { - i := i - wg.Add(1) - go func() { - defer wg.Done() - p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") - w, err := wallet.NewWalletFromFile(p) - require.NoError(t, err, "wallet doesn't exist") - require.Equal(t, 3, len(w.Accounts), "not all accounts were created") - - for _, a := range w.Accounts { - err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams()) - require.NoError(t, err, "can't decrypt account") - switch a.Label { - case constants.ConsensusAccountName: - require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters)) - case constants.CommitteeAccountName: - require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters)) - default: - require.Equal(t, constants.SingleAccountName, a.Label) - } - } - }() - } - wg.Wait() - - t.Run("check contract group wallet", func(t *testing.T) { - p := filepath.Join(walletDir, constants.ContractWalletFilename) - w, err := wallet.NewWalletFromFile(p) - require.NoError(t, err, "contract wallet doesn't exist") - require.Equal(t, 1, len(w.Accounts), "contract wallet must have 1 accout") - require.NoError(t, w.Accounts[0].Decrypt(constants.TestContractPassword, keys.NEP2ScryptParams())) - }) -} - -func setupTestTerminal(t *testing.T) *bytes.Buffer { - in := bytes.NewBuffer(nil) - input.Terminal = term.NewTerminal(input.ReadWriter{ - Reader: in, - Writer: io.Discard, - }, "") - - t.Cleanup(func() { input.Terminal = nil }) - - return in -} diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go deleted file mode 100644 index 73c986713..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go +++ /dev/null @@ -1,101 +0,0 @@ -package generate - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - storageWalletLabelFlag = "label" - storageGasCLIFlag = "initial-gas" - storageGasConfigFlag = "storage.initial_gas" - walletAddressFlag = "wallet-address" -) - -var ( - GenerateStorageCmd = &cobra.Command{ - Use: "generate-storage-wallet", - Short: "Generate storage node wallet for the morph network", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(storageGasConfigFlag, cmd.Flags().Lookup(storageGasCLIFlag)) - }, - RunE: generateStorageCreds, - } - RefillGasCmd = &cobra.Command{ - Use: "refill-gas", - Short: "Refill GAS of storage node's wallet in the morph network", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) - }, - RunE: func(cmd *cobra.Command, _ []string) error { - storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag) - walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag) - - var gasReceivers []util.Uint160 - for _, walletAddress := range walletAddresses { - addr, err := address.StringToUint160(walletAddress) - if err != nil { - return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) - } - - gasReceivers = append(gasReceivers, addr) - } - for _, storageWalletPath := range storageWalletPaths { - w, err := wallet.NewWalletFromFile(storageWalletPath) - if err != nil { - return fmt.Errorf("can't create wallet: %w", err) - } - - gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash()) - } - return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...) - }, - } - GenerateAlphabetCmd = &cobra.Command{ - Use: "generate-alphabet", - Short: "Generate alphabet wallets for consensus nodes of the morph network", - PreRun: func(cmd *cobra.Command, _ []string) { - // PreRun fixes https://github.com/spf13/viper/issues/233 - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - }, - RunE: AlphabetCreds, - } -) - -func initRefillGasCmd() { - RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet") - RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet") - RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer") - RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag) -} - -func initGenerateStorageCmd() { - GenerateStorageCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - GenerateStorageCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - GenerateStorageCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to new storage node wallet") - GenerateStorageCmd.Flags().String(storageGasCLIFlag, "", "Initial amount of GAS to transfer") - GenerateStorageCmd.Flags().StringP(storageWalletLabelFlag, "l", "", "Wallet label") -} - -func initGenerateAlphabetCmd() { - GenerateAlphabetCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - GenerateAlphabetCmd.Flags().Uint(commonflags.AlphabetSizeFlag, 7, "Amount of alphabet wallets to generate") -} - -func init() { - initRefillGasCmd() - initGenerateStorageCmd() - initGenerateAlphabetCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go deleted file mode 100644 index 6499ace5f..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ /dev/null @@ -1,214 +0,0 @@ -package helper - -import ( - "fmt" - - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/viper" -) - -// LocalActor is a kludge, do not use it outside of the morph commands. -type LocalActor struct { - neoActor *actor.Actor - accounts []*wallet.Account - Invoker *invoker.Invoker - rpcInvoker invoker.RPCInvoke -} - -type AlphabetWallets struct { - Label string - Path string -} - -func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) { - w, err := GetAlphabetWallets(v, a.Path) - if err != nil { - return nil, err - } - - var accounts []*wallet.Account - for _, wall := range w { - acc, err := GetWalletAccount(wall, a.Label) - if err != nil { - return nil, err - } - accounts = append(accounts, acc) - } - return accounts, nil -} - -type RegularWallets struct{ Path string } - -func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) { - w, err := getRegularWallet(r.Path) - if err != nil { - return nil, err - } - - return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil -} - -// NewLocalActor create LocalActor with accounts form provided wallets. -// In case of empty wallets provided created actor with dummy account only for read operation. -// -// If wallets are provided, the contract client will use accounts with accName name from these wallets. -// To determine which account name should be used in a contract client, refer to how the contract -// verifies the transaction signature. -func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { - var act *actor.Actor - var accounts []*wallet.Account - var signers []actor.SignerAccount - - if alphabet != nil { - account, err := alphabet.GetAccount(viper.GetViper()) - if err != nil { - return nil, err - } - - accounts = append(accounts, account...) - signers = append(signers, actor.SignerAccount{ - Signer: transaction.Signer{ - Account: account[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: account[0], - }) - } - - for _, w := range regularWallets { - if w == nil { - continue - } - account, err := w.GetAccount() - if err != nil { - return nil, err - } - - accounts = append(accounts, account...) - signers = append(signers, actor.SignerAccount{ - Signer: transaction.Signer{ - Account: account[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: account[0], - }) - } - - act, err := actor.New(c, signers) - if err != nil { - return nil, err - } - return &LocalActor{ - neoActor: act, - accounts: accounts, - Invoker: &act.Invoker, - rpcInvoker: c, - }, nil -} - -func (a *LocalActor) SendCall(contract util.Uint160, method string, params ...any) (util.Uint256, uint32, error) { - tx, err := a.neoActor.MakeCall(contract, method, params...) - if err != nil { - return util.Uint256{}, 0, err - } - err = a.resign(tx) - if err != nil { - return util.Uint256{}, 0, err - } - return a.neoActor.Send(tx) -} - -func (a *LocalActor) SendRun(script []byte) (util.Uint256, uint32, error) { - tx, err := a.neoActor.MakeRun(script) - if err != nil { - return util.Uint256{}, 0, err - } - err = a.resign(tx) - if err != nil { - return util.Uint256{}, 0, err - } - return a.neoActor.Send(tx) -} - -// resign is used to sign tx with committee accounts. -// Inside the methods `MakeCall` and `SendRun` of the NeoGO's actor transaction is signing by committee account, -// because actor uses committee wallet. -// But it is not enough, need to sign with another committee accounts. -func (a *LocalActor) resign(tx *transaction.Transaction) error { - if len(a.accounts[0].Contract.Parameters) > 1 { - // Use parameter context to avoid dealing with signature order. - network := a.neoActor.GetNetwork() - pc := context.NewParameterContext("", network, tx) - h := a.accounts[0].Contract.ScriptHash() - for _, acc := range a.accounts { - priv := acc.PrivateKey() - sign := priv.SignHashable(uint32(network), tx) - if err := pc.AddSignature(h, acc.Contract, priv.PublicKey(), sign); err != nil { - return fmt.Errorf("can't add signature: %w", err) - } - if len(pc.Items[h].Signatures) == len(acc.Contract.Parameters) { - break - } - } - - w, err := pc.GetWitness(h) - if err != nil { - return fmt.Errorf("incomplete signature: %w", err) - } - tx.Scripts[0] = *w - } - return nil -} - -func (a *LocalActor) Wait(h util.Uint256, vub uint32, err error) (*state.AppExecResult, error) { - return a.neoActor.Wait(h, vub, err) -} - -func (a *LocalActor) Sender() util.Uint160 { - return a.neoActor.Sender() -} - -func (a *LocalActor) Call(contract util.Uint160, operation string, params ...any) (*result.Invoke, error) { - return a.neoActor.Call(contract, operation, params...) -} - -func (a *LocalActor) CallAndExpandIterator(_ util.Uint160, _ string, _ int, _ ...any) (*result.Invoke, error) { - panic("unimplemented") -} - -func (a *LocalActor) TerminateSession(_ uuid.UUID) error { - panic("unimplemented") -} - -func (a *LocalActor) TraverseIterator(sessionID uuid.UUID, iterator *result.Iterator, num int) ([]stackitem.Item, error) { - return a.neoActor.TraverseIterator(sessionID, iterator, num) -} - -func (a *LocalActor) MakeRun(_ []byte) (*transaction.Transaction, error) { - panic("unimplemented") -} - -func (a *LocalActor) MakeUnsignedCall(_ util.Uint160, _ string, _ []transaction.Attribute, _ ...any) (*transaction.Transaction, error) { - panic("unimplemented") -} - -func (a *LocalActor) MakeUnsignedRun(_ []byte, _ []transaction.Attribute) (*transaction.Transaction, error) { - panic("unimplemented") -} - -func (a *LocalActor) MakeCall(_ util.Uint160, _ string, _ ...any) (*transaction.Transaction, error) { - panic("unimplemented") -} - -func (a *LocalActor) GetRPCInvoker() invoker.RPCInvoke { - return a.rpcInvoker -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go deleted file mode 100644 index 64d1c6393..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go +++ /dev/null @@ -1,171 +0,0 @@ -package helper - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/spf13/viper" -) - -func getFrostfsIDAdminFromContract(roInvoker *invoker.Invoker) (util.Uint160, bool, error) { - r := management.NewReader(roInvoker) - cs, err := GetContractByID(r, 1) - if err != nil { - return util.Uint160{}, false, fmt.Errorf("get nns contract: %w", err) - } - fidHash, err := NNSResolveHash(roInvoker, cs.Hash, DomainOf(constants.FrostfsIDContract)) - if err != nil { - return util.Uint160{}, false, fmt.Errorf("resolve frostfsid contract hash: %w", err) - } - item, err := unwrap.Item(roInvoker.Call(fidHash, "getAdmin")) - if err != nil { - return util.Uint160{}, false, fmt.Errorf("getAdmin: %w", err) - } - if _, ok := item.(stackitem.Null); ok { - return util.Uint160{}, false, nil - } - - bs, err := item.TryBytes() - if err != nil { - return util.Uint160{}, true, fmt.Errorf("getAdmin: decode result: %w", err) - } - h, err := util.Uint160DecodeBytesBE(bs) - if err != nil { - return util.Uint160{}, true, fmt.Errorf("getAdmin: decode result: %w", err) - } - return h, true, nil -} - -func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any, method string) ([]any, error) { - items := make([]any, 0, 6) - - switch ctrName { - case constants.FrostfsContract: - items = append(items, - c.Contracts[constants.ProcessingContract].Hash, - keysParam, - smartcontract.Parameter{}) - case constants.ProcessingContract: - items = append(items, c.Contracts[constants.FrostfsContract].Hash) - return items[1:], nil // no notary info - case constants.BalanceContract: - items = append(items, - c.Contracts[constants.NetmapContract].Hash, - c.Contracts[constants.ContainerContract].Hash) - case constants.ContainerContract: - // In case if NNS is updated multiple times, we can't calculate - // it's actual hash based on local data, thus query chain. - r := management.NewReader(c.ReadOnlyInvoker) - nnsCs, err := GetContractByID(r, 1) - if err != nil { - return nil, fmt.Errorf("get nns contract: %w", err) - } - items = append(items, - c.Contracts[constants.NetmapContract].Hash, - c.Contracts[constants.BalanceContract].Hash, - c.Contracts[constants.FrostfsIDContract].Hash, - nnsCs.Hash, - "container") - case constants.FrostfsIDContract: - var ( - h util.Uint160 - found bool - err error - ) - if method == constants.UpdateMethodName { - h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker) - } - if method != constants.UpdateMethodName || err == nil && !found { - h, found, err = getFrostfsIDAdmin(viper.GetViper()) - } - if err != nil { - return nil, err - } - - if found { - items = append(items, h) - } else { - items = append(items, c.Contracts[constants.ProxyContract].Hash) - } - case constants.NetmapContract: - md := GetDefaultNetmapContractConfigMap() - if method == constants.UpdateMethodName { - if err := MergeNetmapConfig(c.ReadOnlyInvoker, md); err != nil { - return nil, err - } - } - - var configParam []any - for k, v := range md { - configParam = append(configParam, k, v) - } - - items = append(items, - c.Contracts[constants.BalanceContract].Hash, - c.Contracts[constants.ContainerContract].Hash, - keysParam, - configParam) - case constants.ProxyContract: - items = nil - case constants.PolicyContract: - items = append(items, c.Contracts[constants.ProxyContract].Hash) - default: - panic("invalid contract name: " + ctrName) - } - return items, nil -} - -func GetContractDeployParameters(cs *ContractState, deployData []any) []any { - return []any{cs.RawNEF, cs.RawManifest, deployData} -} - -func DeployNNS(c *InitializeContext, method string) error { - cs := c.GetContract(constants.NNSContract) - h := cs.Hash - - nnsCs, err := c.NNSContractState() - if err != nil { - return err - } - if nnsCs != nil { - if nnsCs.NEF.Checksum == cs.NEF.Checksum { - if method == constants.DeployMethodName { - c.Command.Println("NNS contract is already deployed.") - } else { - c.Command.Println("NNS contract is already updated.") - } - return nil - } - h = nnsCs.Hash - } - - err = AddManifestGroup(c.ContractWallet, h, cs) - if err != nil { - return fmt.Errorf("can't sign manifest group: %v", err) - } - - params := GetContractDeployParameters(cs, nil) - - invokeHash := management.Hash - if method == constants.UpdateMethodName { - invokeHash = nnsCs.Hash - } - - tx, err := c.CommitteeAct.MakeCall(invokeHash, method, params...) - if err != nil { - return fmt.Errorf("failed to create deploy tx for %s: %w", constants.NNSContract, err) - } - - if err := c.MultiSignAndSend(tx, constants.CommitteeAccountName); err != nil { - return fmt.Errorf("can't send deploy transaction: %w", err) - } - - c.Command.Println("NNS hash:", invokeHash.StringLE()) - return c.AwaitTx() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/download.go b/cmd/frostfs-adm/internal/modules/morph/helper/download.go deleted file mode 100644 index 71528a5db..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/download.go +++ /dev/null @@ -1,83 +0,0 @@ -package helper - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "time" - - "code.gitea.io/sdk/gitea" - "github.com/spf13/cobra" -) - -var errNoReleasesFound = errors.New("attempt to fetch contracts archive from the offitial repository failed: no releases found") - -func downloadContracts(cmd *cobra.Command, url string) (io.ReadCloser, error) { - cmd.Printf("Downloading contracts archive from '%s'\n", url) - - // HTTP client with connect timeout - client := http.Client{ - Transport: &http.Transport{ - DialContext: (&net.Dialer{ - Timeout: 10 * time.Second, - }).DialContext, - }, - } - - ctx, cancel := context.WithTimeout(cmd.Context(), 60*time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return nil, fmt.Errorf("can't create request: %w", err) - } - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("can't fetch contracts archive: %w", err) - } - return resp.Body, nil -} - -func downloadContractsFromRepository(cmd *cobra.Command) (io.ReadCloser, error) { - client, err := gitea.NewClient("https://git.frostfs.info") - if err != nil { - return nil, fmt.Errorf("can't initialize repository client: %w", err) - } - - releases, _, err := client.ListReleases("TrueCloudLab", "frostfs-contract", gitea.ListReleasesOptions{}) - if err != nil { - return nil, fmt.Errorf("can't fetch release information: %w", err) - } - - var latestRelease *gitea.Release - for _, r := range releases { - if !r.IsDraft && !r.IsPrerelease { - latestRelease = r - break - } - } - - if latestRelease == nil { - return nil, errNoReleasesFound - } - - cmd.Printf("Found release %s (%s)\n", latestRelease.TagName, latestRelease.Title) - - var url string - for _, a := range latestRelease.Attachments { - if strings.HasPrefix(a.Name, "frostfs-contract") { - url = a.DownloadURL - break - } - } - if url == "" { - return nil, errors.New("can't find contracts archive in the latest release") - } - - return downloadContracts(cmd, url) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go deleted file mode 100644 index fce2dfb74..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go +++ /dev/null @@ -1,35 +0,0 @@ -package helper - -import ( - "fmt" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/viper" -) - -const frostfsIDAdminConfigKey = "frostfsid.admin" - -func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { - admin := v.GetString(frostfsIDAdminConfigKey) - if admin == "" { - return util.Uint160{}, false, nil - } - - h, err := address.StringToUint160(admin) - if err == nil { - return h, true, nil - } - - h, err = util.Uint160DecodeStringLE(admin) - if err == nil { - return h, true, nil - } - - pk, err := keys.NewPublicKeyFromString(admin) - if err == nil { - return pk.GetScriptHash(), true, nil - } - return util.Uint160{}, true, fmt.Errorf("frostfsid: admin is invalid: '%s'", admin) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go deleted file mode 100644 index 38991e962..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package helper - -import ( - "encoding/hex" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" -) - -func TestFrostfsIDConfig(t *testing.T) { - pks := make([]*keys.PrivateKey, 4) - for i := range pks { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - pks[i] = pk - } - - fmts := []string{ - pks[0].GetScriptHash().StringLE(), - address.Uint160ToString(pks[1].GetScriptHash()), - hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), - hex.EncodeToString(pks[3].PublicKey().Bytes()), - } - - for i := range fmts { - v := viper.New() - v.Set("frostfsid.admin", fmts[i]) - - actual, found, err := getFrostfsIDAdmin(v) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, pks[i].GetScriptHash(), actual) - } - - t.Run("bad key", func(t *testing.T) { - v := viper.New() - v.Set("frostfsid.admin", "abc") - - _, found, err := getFrostfsIDAdmin(v) - require.Error(t, err) - require.True(t, found) - }) - t.Run("missing key", func(t *testing.T) { - v := viper.New() - - _, found, err := getFrostfsIDAdmin(v) - require.NoError(t, err) - require.False(t, found) - }) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/group.go b/cmd/frostfs-adm/internal/modules/morph/helper/group.go deleted file mode 100644 index 10a164651..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/group.go +++ /dev/null @@ -1,39 +0,0 @@ -package helper - -import ( - "encoding/json" - - "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" -) - -func AddManifestGroup(cw *wallet.Wallet, h util.Uint160, cs *ContractState) error { - priv := cw.Accounts[0].PrivateKey() - pub := priv.PublicKey() - - sig := priv.Sign(h.BytesBE()) - found := false - - for i := range cs.Manifest.Groups { - if cs.Manifest.Groups[i].PublicKey.Equal(pub) { - cs.Manifest.Groups[i].Signature = sig - found = true - break - } - } - if !found { - cs.Manifest.Groups = append(cs.Manifest.Groups, manifest.Group{ - PublicKey: pub, - Signature: sig, - }) - } - - data, err := json.Marshal(cs.Manifest) - if err != nil { - return err - } - - cs.RawManifest = data - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go deleted file mode 100644 index 50b5c1ec7..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ /dev/null @@ -1,212 +0,0 @@ -package helper - -import ( - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ErrTooManyAlphabetNodes = fmt.Errorf("too many alphabet nodes (maximum allowed is %d)", constants.MaxAlphabetNodes) - -func AwaitTx(cmd *cobra.Command, c Client, txs []HashVUBPair) error { - cmd.Println("Waiting for transactions to persist...") - - at := trigger.Application - - var retErr error - -loop: - for i := range txs { - var it int - var pollInterval time.Duration - var pollIntervalChanged bool - for { - // We must fetch current height before application log, to avoid race condition. - currBlock, err := c.GetBlockCount() - if err != nil { - return fmt.Errorf("can't fetch current block height: %w", err) - } - res, err := c.GetApplicationLog(txs[i].Hash, &at) - if err == nil { - if retErr == nil && len(res.Executions) > 0 && res.Executions[0].VMState != vmstate.Halt { - retErr = fmt.Errorf("tx %d persisted in %s state: %s", - i, res.Executions[0].VMState, res.Executions[0].FaultException) - } - continue loop - } - if txs[i].Vub < currBlock { - return fmt.Errorf("tx was not persisted: Vub=%d, height=%d", txs[i].Vub, currBlock) - } - - pollInterval, pollIntervalChanged = NextPollInterval(it, pollInterval) - if pollIntervalChanged && viper.GetBool(commonflags.Verbose) { - cmd.Printf("Pool interval to check transaction persistence changed: %s\n", pollInterval.String()) - } - - timer := time.NewTimer(pollInterval) - select { - case <-cmd.Context().Done(): - return cmd.Context().Err() - case <-timer.C: - } - - it++ - } - } - - return retErr -} - -func NextPollInterval(it int, previous time.Duration) (time.Duration, bool) { - const minPollInterval = 1 * time.Second - const maxPollInterval = 16 * time.Second - const changeAfter = 5 - if it == 0 { - return minPollInterval, true - } - if it%changeAfter != 0 { - return previous, false - } - nextInterval := previous * 2 - if nextInterval > maxPollInterval { - return maxPollInterval, previous != maxPollInterval - } - return nextInterval, true -} - -func GetWalletAccount(w *wallet.Wallet, typ string) (*wallet.Account, error) { - for i := range w.Accounts { - if w.Accounts[i].Label == typ { - return w.Accounts[i], nil - } - } - return nil, fmt.Errorf("account for '%s' not found", typ) -} - -func GetComitteAcc(cmd *cobra.Command, v *viper.Viper) *wallet.Account { - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) - wallets, err := GetAlphabetWallets(v, walletDir) - commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) - - committeeAcc, err := GetWalletAccount(wallets[0], constants.CommitteeAccountName) - commonCmd.ExitOnErr(cmd, "can't find committee account: %w", err) - return committeeAcc -} - -func NNSResolve(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (stackitem.Item, error) { - return unwrap.Item(inv.Call(nnsHash, "resolve", domain, int64(nns.TXT))) -} - -// ParseNNSResolveResult parses the result of resolving NNS record. -// It works with multiple formats (corresponding to multiple NNS versions). -// If array of hashes is provided, it returns only the first one. -func ParseNNSResolveResult(res stackitem.Item) (util.Uint160, error) { - arr, ok := res.Value().([]stackitem.Item) - if !ok { - arr = []stackitem.Item{res} - } - if _, ok := res.Value().(stackitem.Null); ok || len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") - } - for i := range arr { - bs, err := arr[i].TryBytes() - if err != nil { - continue - } - - // We support several formats for hash encoding, this logic should be maintained in sync - // with NNSResolve from pkg/morph/client/nns.go - h, err := util.Uint160DecodeStringLE(string(bs)) - if err == nil { - return h, nil - } - - h, err = address.StringToUint160(string(bs)) - if err == nil { - return h, nil - } - } - return util.Uint160{}, errors.New("no valid hashes are found") -} - -// NNSResolveHash Returns errMissingNNSRecord if invocation fault exception contains "token not found". -func NNSResolveHash(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (util.Uint160, error) { - item, err := NNSResolve(inv, nnsHash, domain) - if err != nil { - return util.Uint160{}, err - } - return ParseNNSResolveResult(item) -} - -func DomainOf(contract string) string { - return contract + ".frostfs" -} - -func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*keys.PublicKey, error) { - res, err := NNSResolve(inv, nnsHash, domain) - if err != nil { - return nil, err - } - if _, ok := res.Value().(stackitem.Null); ok { - return nil, errors.New("NNS record is missing") - } - arr, ok := res.Value().([]stackitem.Item) - if !ok { - return nil, errors.New("API of the NNS contract method `resolve` has changed") - } - for i := range arr { - var bs []byte - bs, err = arr[i].TryBytes() - if err != nil { - continue - } - - return keys.NewPublicKeyFromString(string(bs)) - } - return nil, errors.New("no valid keys are found") -} - -func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) -} - -func CheckNotaryEnabled(c Client) error { - ns, err := c.GetNativeContracts() - if err != nil { - return fmt.Errorf("can't get native contract hashes: %w", err) - } - - notaryEnabled := false - nativeHashes := make(map[string]util.Uint160, len(ns)) - for i := range ns { - if ns[i].Manifest.Name == nativenames.Notary { - notaryEnabled = true - } - nativeHashes[ns[i].Manifest.Name] = ns[i].Hash - } - if !notaryEnabled { - return errors.New("notary contract must be enabled") - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go deleted file mode 100644 index da5ffedae..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ /dev/null @@ -1,544 +0,0 @@ -package helper - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - io2 "io" - "os" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/nef" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - errNegativeDuration = errors.New("epoch duration must be positive") - errNegativeSize = errors.New("max object size must be positive") -) - -type ContractState struct { - NEF *nef.File - RawNEF []byte - Manifest *manifest.Manifest - RawManifest []byte - Hash util.Uint160 -} - -type Cache struct { - NNSCs *state.Contract - GroupKey *keys.PublicKey -} - -type InitializeContext struct { - ClientContext - Cache - // CommitteeAcc is used for retrieving the committee address and the verification script. - CommitteeAcc *wallet.Account - // ConsensusAcc is used for retrieving the committee address and the verification script. - ConsensusAcc *wallet.Account - Wallets []*wallet.Wallet - // ContractWallet is a wallet for providing the contract group signature. - ContractWallet *wallet.Wallet - // Accounts contains simple signature accounts in the same order as in Wallets. - Accounts []*wallet.Account - Contracts map[string]*ContractState - Command *cobra.Command - ContractPath string - ContractURL string -} - -func (cs *ContractState) Parse() error { - nf, err := nef.FileFromBytes(cs.RawNEF) - if err != nil { - return fmt.Errorf("can't parse NEF file: %w", err) - } - - m := new(manifest.Manifest) - if err := json.Unmarshal(cs.RawManifest, m); err != nil { - return fmt.Errorf("can't parse manifest file: %w", err) - } - - cs.NEF = &nf - cs.Manifest = m - return nil -} - -func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContext, error) { - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) - wallets, err := GetAlphabetWallets(v, walletDir) - if err != nil { - return nil, err - } - - needContracts := cmd.Name() == "update-contracts" || cmd.Name() == "init" - - var w *wallet.Wallet - w, err = getWallet(cmd, v, needContracts, walletDir) - if err != nil { - return nil, err - } - - c, err := createClient(cmd, v, wallets) - if err != nil { - return nil, err - } - - committeeAcc, err := GetWalletAccount(wallets[0], constants.CommitteeAccountName) - if err != nil { - return nil, fmt.Errorf("can't find committee account: %w", err) - } - - consensusAcc, err := GetWalletAccount(wallets[0], constants.ConsensusAccountName) - if err != nil { - return nil, fmt.Errorf("can't find consensus account: %w", err) - } - - if err := validateInit(cmd); err != nil { - return nil, err - } - - ctrPath, err := getContractsPath(cmd, needContracts) - if err != nil { - return nil, err - } - - var ctrURL string - if needContracts { - ctrURL, _ = cmd.Flags().GetString(commonflags.ContractsURLFlag) - } - - if err := CheckNotaryEnabled(c); err != nil { - return nil, err - } - - accounts, err := getSingleAccounts(wallets) - if err != nil { - return nil, err - } - - cliCtx, err := defaultClientContext(c, committeeAcc) - if err != nil { - return nil, fmt.Errorf("client context: %w", err) - } - - initCtx := &InitializeContext{ - ClientContext: *cliCtx, - ConsensusAcc: consensusAcc, - CommitteeAcc: committeeAcc, - ContractWallet: w, - Wallets: wallets, - Accounts: accounts, - Command: cmd, - Contracts: make(map[string]*ContractState), - ContractPath: ctrPath, - ContractURL: ctrURL, - } - - if needContracts { - err := readContracts(initCtx, constants.FullContractList) - if err != nil { - return nil, err - } - } - - return initCtx, nil -} - -func validateInit(cmd *cobra.Command) error { - if cmd.Name() != "init" { - return nil - } - if viper.GetInt64(commonflags.EpochDurationInitFlag) <= 0 { - return errNegativeDuration - } - - if viper.GetInt64(commonflags.MaxObjectSizeInitFlag) <= 0 { - return errNegativeSize - } - - return nil -} - -func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (Client, error) { - var c Client - var err error - if ldf := cmd.Flags().Lookup(commonflags.LocalDumpFlag); ldf != nil && ldf.Changed { - if cmd.Flags().Changed(commonflags.EndpointFlag) { - return nil, fmt.Errorf("`%s` and `%s` flags are mutually exclusive", commonflags.EndpointFlag, commonflags.LocalDumpFlag) - } - c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String()) - } else { - c, err = NewRemoteClient(v) - } - if err != nil { - return nil, fmt.Errorf("can't create N3 client: %w", err) - } - return c, nil -} - -func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) { - if !needContracts { - return "", nil - } - - ctrPath, err := cmd.Flags().GetString(commonflags.ContractsInitFlag) - if err != nil { - return "", fmt.Errorf("invalid contracts path: %w", err) - } - return ctrPath, nil -} - -func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { - accounts := make([]*wallet.Account, len(wallets)) - for i, w := range wallets { - acc, err := GetWalletAccount(w, constants.SingleAccountName) - if err != nil { - return nil, fmt.Errorf("wallet %s is invalid (no single account): %w", w.Path(), err) - } - accounts[i] = acc - } - return accounts, nil -} - -func readContracts(c *InitializeContext, names []string) error { - var ( - fi os.FileInfo - err error - ) - if c.ContractPath != "" { - fi, err = os.Stat(c.ContractPath) - if err != nil { - return fmt.Errorf("invalid contracts path: %w", err) - } - } - - if c.ContractPath != "" && fi.IsDir() { - for _, ctrName := range names { - cs, err := ReadContract(filepath.Join(c.ContractPath, ctrName), ctrName) - if err != nil { - return err - } - c.Contracts[ctrName] = cs - } - } else { - var r io2.ReadCloser - if c.ContractPath != "" { - r, err = os.Open(c.ContractPath) - } else if c.ContractURL != "" { - r, err = downloadContracts(c.Command, c.ContractURL) - } else { - r, err = downloadContractsFromRepository(c.Command) - } - if err != nil { - return fmt.Errorf("can't open contracts archive: %w", err) - } - defer r.Close() - - m, err := readContractsFromArchive(r, names) - if err != nil { - return err - } - for _, name := range names { - if err := m[name].Parse(); err != nil { - return err - } - c.Contracts[name] = m[name] - } - } - - for _, ctrName := range names { - if ctrName != constants.AlphabetContract { - cs := c.Contracts[ctrName] - cs.Hash = state.CreateContractHash(c.CommitteeAcc.Contract.ScriptHash(), - cs.NEF.Checksum, cs.Manifest.Name) - } - } - return nil -} - -func (c *InitializeContext) Close() { - if local, ok := c.Client.(*LocalClient); ok { - err := local.Dump() - if err != nil { - c.Command.PrintErrf("Can't write dump: %v\n", err) - os.Exit(1) - } - } -} - -func (c *InitializeContext) AwaitTx() error { - return c.ClientContext.AwaitTx(c.Command) -} - -func (c *InitializeContext) NNSContractState() (*state.Contract, error) { - if c.NNSCs != nil { - return c.NNSCs, nil - } - - r := management.NewReader(c.ReadOnlyInvoker) - cs, err := r.GetContractByID(1) - if err != nil { - return nil, err - } - - c.NNSCs = cs - return cs, nil -} - -func (c *InitializeContext) GetSigner(tryGroup bool, acc *wallet.Account) transaction.Signer { - if tryGroup && c.GroupKey != nil { - return transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CustomGroups, - AllowedGroups: keys.PublicKeys{c.GroupKey}, - } - } - - signer := transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.Global, // Scope is important, as we have nested call to container contract. - } - - if !tryGroup { - return signer - } - - nnsCs, err := c.NNSContractState() - if err != nil { - return signer - } - - groupKey, err := NNSResolveKey(c.ReadOnlyInvoker, nnsCs.Hash, client.NNSGroupKeyName) - if err == nil { - c.GroupKey = groupKey - - signer.Scopes = transaction.CustomGroups - signer.AllowedGroups = keys.PublicKeys{groupKey} - } - return signer -} - -// SendCommitteeTx creates transaction from script, signs it by committee nodes and sends it to RPC. -// If tryGroup is false, global scope is used for the signer (useful when -// working with native contracts). -func (c *InitializeContext) SendCommitteeTx(script []byte, tryGroup bool) error { - return c.sendMultiTx(script, tryGroup, false) -} - -// SendConsensusTx creates transaction from script, signs it by alphabet nodes and sends it to RPC. -// Not that because this is used only after the contracts were initialized and deployed, -// we always try to have a group scope. -func (c *InitializeContext) SendConsensusTx(script []byte) error { - return c.sendMultiTx(script, true, true) -} - -func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsensus bool) error { - var act *actor.Actor - var err error - - withConsensus = withConsensus && !c.ConsensusAcc.Contract.ScriptHash().Equals(c.CommitteeAcc.ScriptHash()) - if tryGroup { - // Even for consensus signatures we need the committee to pay. - signers := make([]actor.SignerAccount, 1, 2) - signers[0] = actor.SignerAccount{ - Signer: c.GetSigner(tryGroup, c.CommitteeAcc), - Account: c.CommitteeAcc, - } - if withConsensus { - signers = append(signers, actor.SignerAccount{ - Signer: c.GetSigner(tryGroup, c.ConsensusAcc), - Account: c.ConsensusAcc, - }) - } - act, err = actor.New(c.Client, signers) - } else { - assert.False(withConsensus, "BUG: should never happen") - act, err = c.CommitteeAct, nil - } - if err != nil { - return fmt.Errorf("could not create actor: %w", err) - } - - tx, err := act.MakeUnsignedRun(script, []transaction.Attribute{{Type: transaction.HighPriority}}) - if err != nil { - return fmt.Errorf("could not perform test invocation: %w", err) - } - - if err := c.MultiSign(tx, constants.CommitteeAccountName); err != nil { - return err - } - if withConsensus { - if err := c.MultiSign(tx, constants.ConsensusAccountName); err != nil { - return err - } - } - - return c.SendTx(tx, c.Command, false) -} - -func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accType string) error { - if err := c.MultiSign(tx, accType); err != nil { - return err - } - - return c.SendTx(tx, c.Command, false) -} - -func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { - version, err := c.Client.GetVersion() - // error appears only if client - // has not been initialized - assert.NoError(err) - network := version.Protocol.Network - - // Use parameter context to avoid dealing with signature order. - pc := context.NewParameterContext("", network, tx) - h := c.CommitteeAcc.Contract.ScriptHash() - if accType == constants.ConsensusAccountName { - h = c.ConsensusAcc.Contract.ScriptHash() - } - for _, w := range c.Wallets { - acc, err := GetWalletAccount(w, accType) - if err != nil { - return fmt.Errorf("can't find %s wallet account: %w", accType, err) - } - - priv := acc.PrivateKey() - sign := priv.SignHashable(uint32(network), tx) - if err := pc.AddSignature(h, acc.Contract, priv.PublicKey(), sign); err != nil { - return fmt.Errorf("can't add signature: %w", err) - } - if len(pc.Items[h].Signatures) == len(acc.Contract.Parameters) { - break - } - } - - w, err := pc.GetWitness(h) - if err != nil { - return fmt.Errorf("incomplete signature: %w", err) - } - - for i := range tx.Signers { - if tx.Signers[i].Account == h { - assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") - if i < len(tx.Scripts) { - tx.Scripts[i] = *w - } - if i == len(tx.Scripts) { - tx.Scripts = append(tx.Scripts, *w) - } - return nil - } - } - - return fmt.Errorf("%s account was not found among transaction signers", accType) -} - -// EmitUpdateNNSGroupScript emits script for updating group key stored in NNS. -// First return value is true iff the key is already there and nothing should be done. -// Second return value is true iff a domain registration code was emitted. -func (c *InitializeContext) EmitUpdateNNSGroupScript(bw *io.BufBinWriter, nnsHash util.Uint160, pub *keys.PublicKey) (bool, bool, error) { - isAvail, err := NNSIsAvailable(c.Client, nnsHash, client.NNSGroupKeyName) - if err != nil { - return false, false, err - } - - if !isAvail { - currentPub, err := NNSResolveKey(c.ReadOnlyInvoker, nnsHash, client.NNSGroupKeyName) - if err != nil { - return false, false, err - } - - if pub.Equal(currentPub) { - return true, false, nil - } - } - - if isAvail { - emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All, - client.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(), - constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal, - int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - } - - emit.AppCall(bw.BinWriter, nnsHash, "deleteRecords", callflag.All, "group.frostfs", int64(nns.TXT)) - emit.AppCall(bw.BinWriter, nnsHash, "addRecord", callflag.All, - "group.frostfs", int64(nns.TXT), hex.EncodeToString(pub.Bytes())) - - return false, isAvail, nil -} - -func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.Uint160, domain string) ([]byte, bool, error) { - ok, err := NNSIsAvailable(c.Client, nnsHash, domain) - if err != nil { - return nil, false, err - } - - if ok { - bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All, - domain, c.CommitteeAcc.Contract.ScriptHash(), - constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal, - int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - - assert.NoError(bw.Err) - return bw.Bytes(), false, nil - } - - s, err := NNSResolveHash(c.ReadOnlyInvoker, nnsHash, domain) - if err != nil { - return nil, false, err - } - return nil, s == expectedHash, nil -} - -func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) - return !avail, err -} - -func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { - r := management.NewReader(c.ReadOnlyInvoker) - realCs, err := r.GetContract(ctrHash) - return err == nil && realCs != nil && realCs.NEF.Checksum == cs.NEF.Checksum -} - -func (c *InitializeContext) GetContract(ctrName string) *ContractState { - return c.Contracts[ctrName] -} - -func (c *InitializeContext) GetAlphabetDeployItems(i, n int) []any { - items := make([]any, 5) - items[0] = c.Contracts[constants.NetmapContract].Hash - items[1] = c.Contracts[constants.ProxyContract].Hash - items[2] = innerring.GlagoliticLetter(i).String() - items[3] = int64(i) - items[4] = int64(n) - return items -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_test.go deleted file mode 100644 index f3ce42f51..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package helper - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestNextPollInterval(t *testing.T) { - var pollInterval time.Duration - var iteration int - - pollInterval, hasChanged := NextPollInterval(iteration, pollInterval) - require.True(t, hasChanged) - require.Equal(t, time.Second, pollInterval) - - iteration = 4 - pollInterval, hasChanged = NextPollInterval(iteration, pollInterval) - require.False(t, hasChanged) - require.Equal(t, time.Second, pollInterval) - - iteration = 5 - pollInterval, hasChanged = NextPollInterval(iteration, pollInterval) - require.True(t, hasChanged) - require.Equal(t, 2*time.Second, pollInterval) - - iteration = 10 - pollInterval, hasChanged = NextPollInterval(iteration, pollInterval) - require.True(t, hasChanged) - require.Equal(t, 4*time.Second, pollInterval) - - iteration = 20 - pollInterval = 32 * time.Second - pollInterval, hasChanged = NextPollInterval(iteration, pollInterval) - require.True(t, hasChanged) // from 32s to 16s - require.Equal(t, 16*time.Second, pollInterval) - - pollInterval = 16 * time.Second - pollInterval, hasChanged = NextPollInterval(iteration, pollInterval) - require.False(t, hasChanged) - require.Equal(t, 16*time.Second, pollInterval) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go deleted file mode 100644 index 46611c177..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ /dev/null @@ -1,406 +0,0 @@ -package helper - -import ( - "crypto/elliptic" - "errors" - "fmt" - "os" - "sort" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/config" - "github.com/nspcc-dev/neo-go/pkg/core" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/chaindump" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/storage" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -type LocalClient struct { - bc *core.Blockchain - transactions []*transaction.Transaction - dumpPath string - accounts []*wallet.Account -} - -func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) { - cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath)) - if err != nil { - return nil, err - } - - bc, err := core.NewBlockchain(storage.NewMemoryStore(), cfg.Blockchain(), zap.NewNop()) - if err != nil { - return nil, err - } - - go bc.Run() - - accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets) - if err != nil { - return nil, err - } - - if cmd.Name() != "init" { - if err := restoreDump(bc, dumpPath); err != nil { - return nil, fmt.Errorf("restore dump: %w", err) - } - } - - return &LocalClient{ - bc: bc, - dumpPath: dumpPath, - accounts: accounts, - }, nil -} - -func restoreDump(bc *core.Blockchain, dumpPath string) error { - f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) - if err != nil { - return fmt.Errorf("can't open local dump: %w", err) - } - defer f.Close() - - r := io.NewBinReaderFromIO(f) - - var skip uint32 - if bc.BlockHeight() != 0 { - skip = bc.BlockHeight() + 1 - } - - count := r.ReadU32LE() - skip - if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { - return err - } - return nil -} - -func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) { - accounts := make([]*wallet.Account, len(wallets)) - for i := range accounts { - acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName) - if err != nil { - return nil, err - } - accounts[i] = acc - } - - indexMap := make(map[string]int) - for i, pub := range cfg.StandbyCommittee { - indexMap[pub] = i - } - - sort.Slice(accounts, func(i, j int) bool { - pi := accounts[i].PrivateKey().PublicKey().Bytes() - pj := accounts[j].PrivateKey().PublicKey().Bytes() - return indexMap[string(pi)] < indexMap[string(pj)] - }) - sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool { - return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1 - }) - - m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount)) - return accounts[:m], nil -} - -func (l *LocalClient) GetBlockCount() (uint32, error) { - return l.bc.BlockHeight(), nil -} - -func (l *LocalClient) GetNativeContracts() ([]state.Contract, error) { - return l.bc.GetNatives(), nil -} - -func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*result.ApplicationLog, error) { - aer, err := l.bc.GetAppExecResults(h, *t) - if err != nil { - return nil, err - } - - a := result.NewApplicationLog(h, aer, *t) - return &a, nil -} - -// InvokeFunction is implemented via `InvokeScript`. -func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) { - var err error - - pp := make([]any, len(sPrm)) - for i, p := range sPrm { - pp[i], err = smartcontract.ExpandParameterToEmitable(p) - if err != nil { - return nil, fmt.Errorf("incorrect parameter type %s: %w", p.Type, err) - } - } - - return InvokeFunction(l, h, method, pp, ss) -} - -func (l *LocalClient) TerminateSession(_ uuid.UUID) (bool, error) { - // not used by `morph init` command - panic("unexpected call") -} - -func (l *LocalClient) TraverseIterator(_, _ uuid.UUID, _ int) ([]stackitem.Item, error) { - // not used by `morph init` command - panic("unexpected call") -} - -// GetVersion return default version. -func (l *LocalClient) GetVersion() (*result.Version, error) { - c := l.bc.GetConfig() - return &result.Version{ - Protocol: result.Protocol{ - AddressVersion: address.NEO3Prefix, - Network: c.Magic, - MillisecondsPerBlock: int(c.TimePerBlock / time.Millisecond), - MaxTraceableBlocks: c.MaxTraceableBlocks, - MaxValidUntilBlockIncrement: c.MaxValidUntilBlockIncrement, - MaxTransactionsPerBlock: c.MaxTransactionsPerBlock, - MemoryPoolMaxTransactions: c.MemPoolSize, - ValidatorsCount: byte(c.ValidatorsCount), - InitialGasDistribution: c.InitialGASSupply, - CommitteeHistory: c.CommitteeHistory, - P2PSigExtensions: c.P2PSigExtensions, - StateRootInHeader: c.StateRootInHeader, - ValidatorsHistory: c.ValidatorsHistory, - }, - }, nil -} - -func (l *LocalClient) InvokeContractVerify(util.Uint160, []smartcontract.Parameter, []transaction.Signer, ...transaction.Witness) (*result.Invoke, error) { - // not used by `morph init` command - panic("unexpected call") -} - -// CalculateNetworkFee calculates network fee for the given transaction. -// Copied from neo-go with minor corrections (no need to support non-notary mode): -// https://github.com/nspcc-dev/neo-go/blob/v0.103.0/pkg/services/rpcsrv/server.go#L911 -func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) { - // Avoid setting hash for this tx: server code doesn't touch client transaction. - data := tx.Bytes() - tx, err := transaction.NewTransactionFromBytes(data) - if err != nil { - return 0, err - } - - hashablePart, err := tx.EncodeHashableFields() - if err != nil { - return 0, err - } - size := len(hashablePart) + io.GetVarSize(len(tx.Signers)) - var ( - netFee int64 - // Verification GAS cost can't exceed this policy. - gasLimit = l.bc.GetMaxVerificationGAS() - ) - for i, signer := range tx.Signers { - w := tx.Scripts[i] - if len(w.InvocationScript) == 0 { // No invocation provided, try to infer one. - var paramz []manifest.Parameter - if len(w.VerificationScript) == 0 { // Contract-based verification - cs := l.bc.GetContractState(signer.Account) - if cs == nil { - return 0, fmt.Errorf("signer %d has no verification script and no deployed contract", i) - } - md := cs.Manifest.ABI.GetMethod(manifest.MethodVerify, -1) - if md == nil || md.ReturnType != smartcontract.BoolType { - return 0, fmt.Errorf("signer %d has no verify method in deployed contract", i) - } - paramz = md.Parameters // Might as well have none params and it's OK. - } else { // Regular signature verification. - if vm.IsSignatureContract(w.VerificationScript) { - paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}} - } else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok { - paramz = make([]manifest.Parameter, nSigs) - for j := range nSigs { - paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType} - } - } - } - inv := io.NewBufBinWriter() - for _, p := range paramz { - p.Type.EncodeDefaultValue(inv.BinWriter) - } - if inv.Err != nil { - return 0, fmt.Errorf("failed to create dummy invocation script (signer %d): %s", i, inv.Err.Error()) - } - w.InvocationScript = inv.Bytes() - } - gasConsumed, err := l.bc.VerifyWitness(signer.Account, tx, &w, gasLimit) - if err != nil && !errors.Is(err, core.ErrInvalidSignature) { - return 0, err - } - gasLimit -= gasConsumed - netFee += gasConsumed - size += io.GetVarSize(w.VerificationScript) + io.GetVarSize(w.InvocationScript) - } - if l.bc.P2PSigExtensionsEnabled() { - attrs := tx.GetAttributes(transaction.NotaryAssistedT) - if len(attrs) != 0 { - na := attrs[0].Value.(*transaction.NotaryAssisted) - netFee += (int64(na.NKeys) + 1) * l.bc.GetNotaryServiceFeePerKey() - } - } - fee := l.bc.FeePerByte() - netFee += int64(size) * fee - return netFee, nil -} - -func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer) (*result.Invoke, error) { - lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - if err != nil { - return nil, err - } - - tx := transaction.New(script, 0) - tx.Signers = signers - tx.ValidUntilBlock = l.bc.BlockHeight() + 2 - - ic, err := l.bc.GetTestVM(trigger.Application, tx, &block.Block{ - Header: block.Header{ - Index: lastBlock.Index + 1, - Timestamp: lastBlock.Timestamp + 1, - }, - }) - if err != nil { - return nil, fmt.Errorf("get test VM: %w", err) - } - - ic.VM.GasLimit = 100_0000_0000 - ic.VM.LoadScriptWithFlags(script, callflag.All) - - var errStr string - if err := ic.VM.Run(); err != nil { - errStr = err.Error() - } - return &result.Invoke{ - State: ic.VM.State().String(), - GasConsumed: ic.VM.GasConsumed(), - Script: script, - Stack: ic.VM.Estack().ToArray(), - FaultException: errStr, - }, nil -} - -func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) { - tx = tx.Copy() - l.transactions = append(l.transactions, tx) - return tx.Hash(), nil -} - -func (l *LocalClient) putTransactions() error { - // 1. Prepare new block. - lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - assert.NoError(err) - defer func() { l.transactions = l.transactions[:0] }() - - b := &block.Block{ - Header: block.Header{ - NextConsensus: l.accounts[0].Contract.ScriptHash(), - Script: transaction.Witness{ - VerificationScript: l.accounts[0].Contract.Script, - }, - Timestamp: lastBlock.Timestamp + 1, - }, - Transactions: l.transactions, - } - - if l.bc.GetConfig().StateRootInHeader { - b.StateRootEnabled = true - b.PrevStateRoot = l.bc.GetStateModule().CurrentLocalStateRoot() - } - b.PrevHash = lastBlock.Hash() - b.Index = lastBlock.Index + 1 - b.RebuildMerkleRoot() - - // 2. Sign prepared block. - var invocationScript []byte - - magic := l.bc.GetConfig().Magic - for _, acc := range l.accounts { - sign := acc.PrivateKey().SignHashable(uint32(magic), b) - invocationScript = append(invocationScript, byte(opcode.PUSHDATA1), 64) - invocationScript = append(invocationScript, sign...) - } - b.Script.InvocationScript = invocationScript - - // 3. Persist block. - return l.bc.AddBlock(b) -} - -func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, signers []transaction.Signer) (*result.Invoke, error) { - w := io.NewBufBinWriter() - emit.Array(w.BinWriter, parameters...) - emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) - return c.InvokeScript(w.Bytes(), signers) -} - -var errGetDesignatedByRoleResponse = errors.New("`getDesignatedByRole`: invalid response") - -func GetDesignatedByRole(inv *invoker.Invoker, h util.Uint160, role noderoles.Role, u uint32) (keys.PublicKeys, error) { - arr, err := unwrap.Array(inv.Call(h, "getDesignatedByRole", int64(role), int64(u))) - if err != nil { - return nil, errGetDesignatedByRoleResponse - } - - pubs := make(keys.PublicKeys, len(arr)) - for i := range arr { - bs, err := arr[i].TryBytes() - if err != nil { - return nil, errGetDesignatedByRoleResponse - } - pubs[i], err = keys.NewPublicKeyFromBytes(bs, elliptic.P256()) - if err != nil { - return nil, errGetDesignatedByRoleResponse - } - } - - return pubs, nil -} - -func (l *LocalClient) Dump() (err error) { - defer l.bc.Close() - - f, err := os.Create(l.dumpPath) - if err != nil { - return err - } - defer func() { - closeErr := f.Close() - if err == nil && closeErr != nil { - err = closeErr - } - }() - - w := io.NewBinWriterFromIO(f) - w.WriteU32LE(l.bc.BlockHeight() + 1) - err = chaindump.Dump(l.bc, w, 0, l.bc.BlockHeight()+1) - return -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go deleted file mode 100644 index 3f3a66cb6..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ /dev/null @@ -1,137 +0,0 @@ -package helper - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// Client represents N3 client interface capable of test-invoking scripts -// and sending signed transactions to chain. -type Client interface { - actor.RPCActor - - GetNativeContracts() ([]state.Contract, error) - GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error) -} - -type HashVUBPair struct { - Hash util.Uint256 - Vub uint32 -} - -type ClientContext struct { - Client Client // a raw neo-go client OR a local chain implementation - CommitteeAct *actor.Actor // committee actor with the Global witness scope - ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer - SentTxs []HashVUBPair -} - -func NewRemoteClient(v *viper.Viper) (Client, error) { - // number of opened connections - // by neo-go client per one host - const ( - maxConnsPerHost = 10 - requestTimeout = time.Second * 10 - ) - - ctx := context.Background() - endpoint := v.GetString(commonflags.EndpointFlag) - if endpoint == "" { - return nil, errors.New("missing endpoint") - } - - var cfg *tls.Config - if rootCAs := v.GetStringSlice("tls.trusted_ca_list"); len(rootCAs) != 0 { - certFile := v.GetString("tls.certificate") - keyFile := v.GetString("tls.key") - - tlsConfig, err := rpcclient.TLSClientConfig(rootCAs, certFile, keyFile) - if err != nil { - return nil, err - } - - cfg = tlsConfig - } - c, err := rpcclient.New(ctx, endpoint, rpcclient.Options{ - MaxConnsPerHost: maxConnsPerHost, - RequestTimeout: requestTimeout, - TLSClientConfig: cfg, - }) - if err != nil { - return nil, err - } - if err := c.Init(); err != nil { - return nil, err - } - return c, nil -} - -func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { - commAct, err := actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: committeeAcc.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: committeeAcc, - }}) - if err != nil { - return nil, err - } - - return &ClientContext{ - Client: c, - CommitteeAct: commAct, - ReadOnlyInvoker: invoker.New(c, nil), - }, nil -} - -func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command, await bool) error { - h, err := c.Client.SendRawTransaction(tx) - if err != nil { - return err - } - - if h != tx.Hash() { - return fmt.Errorf("sent and actual tx hashes mismatch:\n\tsent: %v\n\tactual: %v", tx.Hash().StringLE(), h.StringLE()) - } - - c.SentTxs = append(c.SentTxs, HashVUBPair{Hash: h, Vub: tx.ValidUntilBlock}) - - if await { - return c.AwaitTx(cmd) - } - return nil -} - -func (c *ClientContext) AwaitTx(cmd *cobra.Command) error { - if len(c.SentTxs) == 0 { - return nil - } - - if local, ok := c.Client.(*LocalClient); ok { - if err := local.putTransactions(); err != nil { - return fmt.Errorf("can't persist transactions: %w", err) - } - } - - err := AwaitTx(cmd, c.Client, c.SentTxs) - c.SentTxs = c.SentTxs[:0] - - return err -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go deleted file mode 100644 index 20abaff0a..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ /dev/null @@ -1,127 +0,0 @@ -package helper - -import ( - "errors" - "fmt" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/spf13/viper" -) - -var NetmapConfigKeys = []string{ - netmap.EpochDurationConfig, - netmap.MaxObjectSizeConfig, - netmap.ContainerFeeConfig, - netmap.ContainerAliasFeeConfig, - netmap.IrCandidateFeeConfig, - netmap.WithdrawFeeConfig, - netmap.HomomorphicHashingDisabledKey, - netmap.MaintenanceModeAllowedConfig, -} - -var errFailedToFetchListOfNetworkKeys = errors.New("can't fetch list of network config keys from the netmap contract") - -func GetDefaultNetmapContractConfigMap() map[string]any { - m := make(map[string]any) - m[netmap.EpochDurationConfig] = viper.GetInt64(commonflags.EpochDurationInitFlag) - m[netmap.MaxObjectSizeConfig] = viper.GetInt64(commonflags.MaxObjectSizeInitFlag) - m[netmap.MaxECDataCountConfig] = viper.GetInt64(commonflags.MaxECDataCountFlag) - m[netmap.MaxECParityCountConfig] = viper.GetInt64(commonflags.MaxECParityCounFlag) - m[netmap.ContainerFeeConfig] = viper.GetInt64(commonflags.ContainerFeeInitFlag) - m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(commonflags.ContainerAliasFeeInitFlag) - m[netmap.IrCandidateFeeConfig] = viper.GetInt64(commonflags.CandidateFeeInitFlag) - m[netmap.WithdrawFeeConfig] = viper.GetInt64(commonflags.WithdrawFeeInitFlag) - m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(commonflags.HomomorphicHashDisabledInitFlag) - m[netmap.MaintenanceModeAllowedConfig] = viper.GetBool(commonflags.MaintenanceModeAllowedInitFlag) - return m -} - -func ParseConfigFromNetmapContract(arr []stackitem.Item) (map[string][]byte, error) { - m := make(map[string][]byte, len(arr)) - for _, param := range arr { - tuple, ok := param.Value().([]stackitem.Item) - if !ok || len(tuple) != 2 { - return nil, errors.New("invalid ListConfig response from netmap contract") - } - - k, err := tuple[0].TryBytes() - if err != nil { - return nil, errors.New("invalid config key from netmap contract") - } - - v, err := tuple[1].TryBytes() - if err != nil { - return nil, InvalidConfigValueErr(string(k)) - } - m[string(k)] = v - } - return m, nil -} - -func InvalidConfigValueErr(key string) error { - return fmt.Errorf("invalid %s config value from netmap contract", key) -} - -func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error { - if countEpoch <= 0 { - return errors.New("number of epochs cannot be less than 1") - } - - curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch")) - if err != nil { - return errors.New("can't fetch current epoch from the netmap contract") - } - - newEpoch := curr + countEpoch - wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch) - - // In NeoFS this is done via Notary contract. Here, however, we can form the - // transaction locally. - emit.AppCall(bw.BinWriter, nmHash, "newEpoch", callflag.All, newEpoch) - return bw.Err -} - -func GetNetConfigFromNetmapContract(roInvoker *invoker.Invoker) ([]stackitem.Item, error) { - r := management.NewReader(roInvoker) - cs, err := GetContractByID(r, 1) - if err != nil { - return nil, fmt.Errorf("get nns contract: %w", err) - } - nmHash, err := NNSResolveHash(roInvoker, cs.Hash, DomainOf(constants.NetmapContract)) - if err != nil { - return nil, fmt.Errorf("can't get netmap contract hash: %w", err) - } - arr, err := unwrap.Array(roInvoker.Call(nmHash, "listConfig")) - if err != nil { - return nil, errFailedToFetchListOfNetworkKeys - } - return arr, err -} - -func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error { - arr, err := GetNetConfigFromNetmapContract(roInvoker) - if err != nil { - return err - } - m, err := ParseConfigFromNetmapContract(arr) - if err != nil { - return err - } - for k, v := range m { - if slices.Contains(NetmapConfigKeys, k) { - md[k] = v - } - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go deleted file mode 100644 index be6b2c6dd..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ /dev/null @@ -1,210 +0,0 @@ -package helper - -import ( - "archive/tar" - "compress/gzip" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/viper" -) - -func getRegularWallet(walletPath string) (*wallet.Wallet, error) { - w, err := wallet.NewWalletFromFile(walletPath) - if err != nil { - return nil, err - } - - password, err := input.ReadPassword("Enter password for wallet:") - if err != nil { - return nil, fmt.Errorf("can't fetch password: %w", err) - } - - for i := range w.Accounts { - if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { - err = fmt.Errorf("can't unlock wallet: %w", err) - break - } - } - - return w, err -} - -func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { - wallets, err := openAlphabetWallets(v, walletDir) - if err != nil { - return nil, err - } - - if len(wallets) > constants.MaxAlphabetNodes { - return nil, ErrTooManyAlphabetNodes - } - return wallets, nil -} - -func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { - walletFiles, err := os.ReadDir(walletDir) - if err != nil { - return nil, fmt.Errorf("can't read alphabet wallets dir: %w", err) - } - - var wallets []*wallet.Wallet - var letter string - for i := range constants.MaxAlphabetNodes { - letter = innerring.GlagoliticLetter(i).String() - p := filepath.Join(walletDir, letter+".json") - var w *wallet.Wallet - w, err = wallet.NewWalletFromFile(p) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - err = nil - } else { - err = fmt.Errorf("can't open alphabet wallet: %w", err) - } - break - } - - var password string - password, err = config.GetPassword(v, letter) - if err != nil { - err = fmt.Errorf("can't fetch password: %w", err) - break - } - - for i := range w.Accounts { - if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { - err = fmt.Errorf("can't unlock wallet: %w", err) - break - } - } - - wallets = append(wallets, w) - } - if err != nil { - return nil, fmt.Errorf("can't read wallet for letter '%s': %w", letter, err) - } - if len(wallets) == 0 { - err = errors.New("there are no alphabet wallets in dir (run `generate-alphabet` command first)") - if len(walletFiles) > 0 { - err = fmt.Errorf("use glagolitic names for wallets(run `print-alphabet`): %w", err) - } - return nil, err - } - return wallets, nil -} - -func ReadContract(ctrPath, ctrName string) (*ContractState, error) { - rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef")) - if err != nil { - return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err) - } - rawManif, err := os.ReadFile(filepath.Join(ctrPath, "config.json")) - if err != nil { - return nil, fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err) - } - - cs := &ContractState{ - RawNEF: rawNef, - RawManifest: rawManif, - } - - return cs, cs.Parse() -} - -func readContractsFromArchive(file io.Reader, names []string) (map[string]*ContractState, error) { - m := make(map[string]*ContractState, len(names)) - for i := range names { - m[names[i]] = new(ContractState) - } - - gr, err := gzip.NewReader(file) - if err != nil { - return nil, fmt.Errorf("contracts file must be tar.gz archive: %w", err) - } - - r := tar.NewReader(gr) - var h *tar.Header - for h, err = r.Next(); err == nil && h != nil; h, err = r.Next() { - if h.Typeflag != tar.TypeReg { - continue - } - dir, _ := filepath.Split(h.Name) - ctrName := filepath.Base(dir) - - cs, ok := m[ctrName] - if !ok { - continue - } - - switch { - case strings.HasSuffix(h.Name, filepath.Join(ctrName, ctrName+"_contract.nef")): - cs.RawNEF, err = io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err) - } - case strings.HasSuffix(h.Name, "config.json"): - cs.RawManifest, err = io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err) - } - } - m[ctrName] = cs - } - if err != nil && err != io.EOF { - return nil, fmt.Errorf("can't read contracts from archive: %w", err) - } - - for ctrName, cs := range m { - if cs.RawNEF == nil { - return nil, fmt.Errorf("NEF for %s contract wasn't found", ctrName) - } - if cs.RawManifest == nil { - return nil, fmt.Errorf("manifest for %s contract wasn't found", ctrName) - } - } - return m, nil -} - -func GetAlphabetNNSDomain(i int) string { - return constants.AlphabetContract + strconv.FormatUint(uint64(i), 10) + ".frostfs" -} - -func ParseGASAmount(s string) (fixedn.Fixed8, error) { - gasAmount, err := fixedn.Fixed8FromString(s) - if err != nil { - return 0, fmt.Errorf("invalid GAS amount %s: %w", s, err) - } - if gasAmount <= 0 { - return 0, fmt.Errorf("GAS amount must be positive (got %d)", gasAmount) - } - return gasAmount, nil -} - -// GetContractByID retrieves a contract by its ID using the standard GetContractByID method. -// However, if the returned state.Contract is nil, it returns an error indicating that the contract was not found. -// See https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1210 -func GetContractByID(r *management.ContractReader, id int32) (*state.Contract, error) { - cs, err := r.GetContractByID(id) - if err != nil { - return nil, err - } - - if cs == nil { - return nil, errors.New("contract not found") - } - return cs, nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/wallet.go b/cmd/frostfs-adm/internal/modules/morph/helper/wallet.go deleted file mode 100644 index bd01cd59e..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/helper/wallet.go +++ /dev/null @@ -1,76 +0,0 @@ -package helper - -import ( - "fmt" - "os" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func InitializeContractWallet(v *viper.Viper, walletDir string) (*wallet.Wallet, error) { - password, err := config.GetPassword(v, constants.ContractWalletPasswordKey) - if err != nil { - return nil, err - } - - w, err := wallet.NewWallet(filepath.Join(walletDir, constants.ContractWalletFilename)) - if err != nil { - return nil, err - } - - acc, err := wallet.NewAccount() - if err != nil { - return nil, err - } - - err = acc.Encrypt(password, keys.NEP2ScryptParams()) - if err != nil { - return nil, err - } - - w.AddAccount(acc) - if err := w.SavePretty(); err != nil { - return nil, err - } - - return w, nil -} - -func openContractWallet(v *viper.Viper, cmd *cobra.Command, walletDir string) (*wallet.Wallet, error) { - p := filepath.Join(walletDir, constants.ContractWalletFilename) - w, err := wallet.NewWalletFromFile(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, fmt.Errorf("can't open wallet: %w", err) - } - - cmd.Printf("Contract group wallet is missing, initialize at %s\n", p) - return InitializeContractWallet(v, walletDir) - } - - password, err := config.GetPassword(v, constants.ContractWalletPasswordKey) - if err != nil { - return nil, err - } - - for i := range w.Accounts { - if err := w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { - return nil, fmt.Errorf("can't unlock wallet: %w", err) - } - } - - return w, nil -} - -func getWallet(cmd *cobra.Command, v *viper.Viper, needContracts bool, walletDir string) (*wallet.Wallet, error) { - if !needContracts { - return nil, nil - } - return openContractWallet(v, cmd, walletDir) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go deleted file mode 100644 index cdaf7d3bc..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go +++ /dev/null @@ -1,59 +0,0 @@ -package initialize - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func initializeSideChainCmd(cmd *cobra.Command, _ []string) error { - initCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("initialization error: %w", err) - } - defer initCtx.Close() - - // 1. Transfer funds to committee accounts. - cmd.Println("Stage 1: transfer GAS to alphabet nodes.") - if err := transferFunds(initCtx); err != nil { - return err - } - - cmd.Println("Stage 2: set notary and alphabet nodes in designate contract.") - if err := setNotaryAndAlphabetNodes(initCtx); err != nil { - return err - } - - // 3. Deploy NNS contract. - cmd.Println("Stage 3: deploy NNS contract.") - if err := helper.DeployNNS(initCtx, constants.DeployMethodName); err != nil { - return err - } - - // 4. Deploy NeoFS contracts. - cmd.Println("Stage 4: deploy NeoFS contracts.") - if err := deployContracts(initCtx); err != nil { - return err - } - - cmd.Println("Stage 4.1: Transfer GAS to proxy contract.") - if err := transferGASToProxy(initCtx); err != nil { - return err - } - - cmd.Println("Stage 5: register candidates.") - if err := registerCandidates(initCtx); err != nil { - return err - } - - cmd.Println("Stage 6: transfer NEO to alphabet contracts.") - if err := transferNEOToAlphabetContracts(initCtx); err != nil { - return err - } - - cmd.Println("Stage 7: set addresses in NNS.") - return setNNS(initCtx) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_deploy.go deleted file mode 100644 index f40ea732c..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_deploy.go +++ /dev/null @@ -1,80 +0,0 @@ -package initialize - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" -) - -func deployContracts(c *helper.InitializeContext) error { - alphaCs := c.GetContract(constants.AlphabetContract) - - var keysParam []any - - baseGroups := alphaCs.Manifest.Groups - - // alphabet contracts should be deployed by individual nodes to get different hashes. - for i, acc := range c.Accounts { - ctrHash := state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name) - if c.IsUpdated(ctrHash, alphaCs) { - c.Command.Printf("Alphabet contract #%d is already deployed.\n", i) - continue - } - - alphaCs.Manifest.Groups = baseGroups - err := helper.AddManifestGroup(c.ContractWallet, ctrHash, alphaCs) - if err != nil { - return fmt.Errorf("can't sign manifest group: %v", err) - } - - keysParam = append(keysParam, acc.PrivateKey().PublicKey().Bytes()) - params := helper.GetContractDeployParameters(alphaCs, c.GetAlphabetDeployItems(i, len(c.Wallets))) - - act, err := actor.NewSimple(c.Client, acc) - if err != nil { - return fmt.Errorf("could not create actor: %w", err) - } - - txHash, vub, err := act.SendCall(management.Hash, constants.DeployMethodName, params...) - if err != nil { - return fmt.Errorf("can't deploy alphabet #%d contract: %w", i, err) - } - - c.SentTxs = append(c.SentTxs, helper.HashVUBPair{Hash: txHash, Vub: vub}) - } - - for _, ctrName := range constants.ContractList { - cs := c.GetContract(ctrName) - - ctrHash := cs.Hash - if c.IsUpdated(ctrHash, cs) { - c.Command.Printf("%s contract is already deployed.\n", ctrName) - continue - } - - err := helper.AddManifestGroup(c.ContractWallet, ctrHash, cs) - if err != nil { - return fmt.Errorf("can't sign manifest group: %v", err) - } - - args, err := helper.GetContractDeployData(c, ctrName, keysParam, constants.DeployMethodName) - if err != nil { - return fmt.Errorf("%s: getting deploy params: %v", ctrName, err) - } - params := helper.GetContractDeployParameters(cs, args) - res, err := c.CommitteeAct.MakeCall(management.Hash, constants.DeployMethodName, params...) - if err != nil { - return fmt.Errorf("can't deploy %s contract: %w", ctrName, err) - } - - if err := c.SendCommitteeTx(res.Script, false); err != nil { - return err - } - } - - return c.AwaitTx() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go deleted file mode 100644 index 176356378..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ /dev/null @@ -1,134 +0,0 @@ -package initialize - -import ( - "encoding/hex" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -func setNNS(c *helper.InitializeContext) error { - r := management.NewReader(c.ReadOnlyInvoker) - nnsCs, err := helper.GetContractByID(r, 1) - if err != nil { - return err - } - - ok, err := c.NNSRootRegistered(nnsCs.Hash, "frostfs") - if err != nil { - return err - } else if !ok { - bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All, - "frostfs", c.CommitteeAcc.Contract.ScriptHash(), - constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal, - int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - if err := c.SendCommitteeTx(bw.Bytes(), true); err != nil { - return fmt.Errorf("can't add domain root to NNS: %w", err) - } - if err := c.AwaitTx(); err != nil { - return err - } - } - - alphaCs := c.GetContract(constants.AlphabetContract) - for i, acc := range c.Accounts { - alphaCs.Hash = state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name) - - domain := helper.GetAlphabetNNSDomain(i) - if err := nnsRegisterDomain(c, nnsCs.Hash, alphaCs.Hash, domain); err != nil { - return err - } - c.Command.Printf("NNS: Set %s -> %s\n", domain, alphaCs.Hash.StringLE()) - } - - for _, ctrName := range constants.ContractList { - cs := c.GetContract(ctrName) - - domain := ctrName + ".frostfs" - if err := nnsRegisterDomain(c, nnsCs.Hash, cs.Hash, domain); err != nil { - return err - } - c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE()) - } - - groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey() - err = updateNNSGroup(c, nnsCs.Hash, groupKey) - if err != nil { - return err - } - c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes())) - - return c.AwaitTx() -} - -func updateNNSGroup(c *helper.InitializeContext, nnsHash util.Uint160, pub *keys.PublicKey) error { - bw := io.NewBufBinWriter() - keyAlreadyAdded, domainRegCodeEmitted, err := c.EmitUpdateNNSGroupScript(bw, nnsHash, pub) - if keyAlreadyAdded || err != nil { - return err - } - - script := bw.Bytes() - if domainRegCodeEmitted { - w := io.NewBufBinWriter() - emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1}) - wrapRegisterScriptWithPrice(w, nnsHash, script) - script = w.Bytes() - } - - return c.SendCommitteeTx(script, true) -} - -// wrapRegisterScriptWithPrice wraps a given script with `getPrice`/`setPrice` calls for NNS. -// It is intended to be used for a single transaction, and not as a part of other scripts. -// It is assumed that script already contains static slot initialization code, the first one -// (with index 0) is used to store the price. -func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []byte) { - if len(s) == 0 { - return - } - - emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All) - emit.Opcodes(w.BinWriter, opcode.STSFLD0) - emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1) - - w.WriteBytes(s) - - emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) - emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - - assert.NoError(w.Err, "can't wrap register script") -} - -func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { - script, ok, err := c.NNSRegisterDomainScript(nnsHash, expectedHash, domain) - if ok || err != nil { - return err - } - - w := io.NewBufBinWriter() - emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1}) - wrapRegisterScriptWithPrice(w, nnsHash, script) - - emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT)) - emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All, - domain, int64(nns.TXT), expectedHash.StringLE()) - emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All, - domain, int64(nns.TXT), address.Uint160ToString(expectedHash)) - return c.SendCommitteeTx(w.Bytes(), true) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go deleted file mode 100644 index 7b7597d91..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ /dev/null @@ -1,142 +0,0 @@ -package initialize - -import ( - "fmt" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "github.com/nspcc-dev/neo-go/pkg/core/native" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -// initialAlphabetNEOAmount represents the total amount of GAS distributed between alphabet nodes. -const ( - initialAlphabetNEOAmount = native.NEOTotalSupply - registerBatchSize = transaction.MaxAttributes - 1 -) - -func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - reader := neo.NewReader(c.ReadOnlyInvoker) - regPrice, err := reader.GetRegisterPrice() - if err != nil { - return fmt.Errorf("can't fetch registration price: %w", err) - } - - w := io.NewBufBinWriter() - emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, 1) - for _, acc := range c.Accounts[start:end] { - emit.AppCall(w.BinWriter, neo.Hash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes()) - emit.Opcodes(w.BinWriter, opcode.ASSERT) - } - emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - assert.NoError(w.Err) - - signers := []actor.SignerAccount{{ - Signer: c.GetSigner(false, c.CommitteeAcc), - Account: c.CommitteeAcc, - }} - for _, acc := range c.Accounts[start:end] { - signers = append(signers, actor.SignerAccount{ - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{neo.Hash}, - }, - Account: acc, - }) - } - - act, err := actor.New(c.Client, signers) - if err != nil { - return fmt.Errorf("can't create actor: %w", err) - } - tx, err := act.MakeRun(w.Bytes()) - if err != nil { - return fmt.Errorf("can't create tx: %w", err) - } - if err := c.MultiSign(tx, constants.CommitteeAccountName); err != nil { - return fmt.Errorf("can't sign a transaction: %w", err) - } - - network := c.CommitteeAct.GetNetwork() - for _, acc := range c.Accounts[start:end] { - if err := acc.SignTx(network, tx); err != nil { - return fmt.Errorf("can't sign a transaction: %w", err) - } - } - - return c.SendTx(tx, c.Command, true) -} - -func registerCandidates(c *helper.InitializeContext) error { - cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neo.Hash, "getCandidates")) - if err != nil { - return fmt.Errorf("`getCandidates`: %w", err) - } - - need := len(c.Accounts) - have := len(cc) - - if need == have { - c.Command.Println("Candidates are already registered.") - return nil - } - - // Register candidates in batches in order to overcome the signers amount limit. - // See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27 - for i := 0; i < need; i += registerBatchSize { - start, end := i, min(i+registerBatchSize, need) - // This check is sound because transactions are accepted/rejected atomically. - if have >= end { - continue - } - if err := registerCandidateRange(c, start, end); err != nil { - return fmt.Errorf("registering candidates %d..%d: %q", start, end-1, err) - } - } - - return nil -} - -func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { - neoHash := neo.Hash - - ok, err := transferNEOFinished(c) - if ok || err != nil { - return err - } - - cs := c.GetContract(constants.AlphabetContract) - amount := initialAlphabetNEOAmount / len(c.Wallets) - - bw := io.NewBufBinWriter() - for _, acc := range c.Accounts { - h := state.CreateContractHash(acc.Contract.ScriptHash(), cs.NEF.Checksum, cs.Manifest.Name) - emit.AppCall(bw.BinWriter, neoHash, "transfer", callflag.All, - c.CommitteeAcc.Contract.ScriptHash(), h, int64(amount), nil) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) - } - - if err := c.SendCommitteeTx(bw.Bytes(), false); err != nil { - return err - } - - return c.AwaitTx() -} - -func transferNEOFinished(c *helper.InitializeContext) (bool, error) { - r := neo.NewReader(c.ReadOnlyInvoker) - bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) - return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go deleted file mode 100644 index 05bc83a8b..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go +++ /dev/null @@ -1,52 +0,0 @@ -package initialize - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" -) - -func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error { - if ok, err := setRolesFinished(c); ok || err != nil { - if err == nil { - c.Command.Println("Stage 2: already performed.") - } - return err - } - - var pubs []any - for _, acc := range c.Accounts { - pubs = append(pubs, acc.PrivateKey().PublicKey().Bytes()) - } - - w := io.NewBufBinWriter() - emit.AppCall(w.BinWriter, rolemgmt.Hash, "designateAsRole", - callflag.States|callflag.AllowNotify, int64(noderoles.P2PNotary), pubs) - emit.AppCall(w.BinWriter, rolemgmt.Hash, "designateAsRole", - callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs) - - if err := c.SendCommitteeTx(w.Bytes(), false); err != nil { - return fmt.Errorf("send committee transaction: %w", err) - } - - err := c.AwaitTx() - if err != nil { - err = fmt.Errorf("await committee transaction: %w", err) - } - return err -} - -func setRolesFinished(c *helper.InitializeContext) (bool, error) { - height, err := c.Client.GetBlockCount() - if err != nil { - return false, err - } - - pubs, err := helper.GetDesignatedByRole(c.ReadOnlyInvoker, rolemgmt.Hash, noderoles.NeoFSAlphabet, height) - return len(pubs) == len(c.Wallets), err -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go deleted file mode 100644 index 9bc51c055..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package initialize - -import ( - "encoding/hex" - "fmt" - "os" - "path/filepath" - "strconv" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - cmdConfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/generate" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/node" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/nspcc-dev/neo-go/pkg/config" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" -) - -const ( - contractsPath = "../../../../../../contract/frostfs-contract-v0.18.0.tar.gz" - protoFileName = "proto.yml" -) - -func TestInitialize(t *testing.T) { - // This test needs frostfs-contract tarball, so it is skipped by default. - // It is here for performing local testing after the changes. - t.Skip() - - t.Run("1 nodes", func(t *testing.T) { - testInitialize(t, 1) - }) - t.Run("4 nodes", func(t *testing.T) { - testInitialize(t, 4) - }) - t.Run("7 nodes", func(t *testing.T) { - testInitialize(t, 7) - }) - t.Run("16 nodes", func(t *testing.T) { - testInitialize(t, 16) - }) - t.Run("max nodes", func(t *testing.T) { - testInitialize(t, constants.MaxAlphabetNodes) - }) - t.Run("too many nodes", func(t *testing.T) { - require.ErrorIs(t, generateTestData(t.TempDir(), constants.MaxAlphabetNodes+1), helper.ErrTooManyAlphabetNodes) - }) -} - -func testInitialize(t *testing.T, committeeSize int) { - testdataDir := t.TempDir() - v := viper.GetViper() - - require.NoError(t, generateTestData(testdataDir, committeeSize)) - v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName)) - - // Set to the path or remove the next statement to download from the network. - require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath)) - - dumpPath := filepath.Join(testdataDir, "out") - require.NoError(t, Cmd.Flags().Set(commonflags.LocalDumpFlag, dumpPath)) - v.Set(commonflags.AlphabetWalletsFlag, testdataDir) - v.Set(commonflags.EpochDurationInitFlag, 1) - v.Set(commonflags.MaxObjectSizeInitFlag, 1024) - - setTestCredentials(v, committeeSize) - require.NoError(t, initializeSideChainCmd(Cmd, nil)) - - t.Run("force-new-epoch", func(t *testing.T) { - require.NoError(t, netmap.ForceNewEpoch.Flags().Set(commonflags.LocalDumpFlag, dumpPath)) - require.NoError(t, netmap.ForceNewEpochCmd(netmap.ForceNewEpoch, nil)) - }) - t.Run("set-config", func(t *testing.T) { - require.NoError(t, cmdConfig.SetCmd.Flags().Set(commonflags.LocalDumpFlag, dumpPath)) - require.NoError(t, cmdConfig.SetConfigCmd(cmdConfig.SetCmd, []string{"MaintenanceModeAllowed=true"})) - }) - t.Run("set-policy", func(t *testing.T) { - require.NoError(t, policy.Set.Flags().Set(commonflags.LocalDumpFlag, dumpPath)) - require.NoError(t, policy.SetPolicyCmd(policy.Set, []string{"ExecFeeFactor=1"})) - }) - t.Run("remove-node", func(t *testing.T) { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - pub := hex.EncodeToString(pk.PublicKey().Bytes()) - require.NoError(t, node.RemoveCmd.Flags().Set(commonflags.LocalDumpFlag, dumpPath)) - require.NoError(t, node.RemoveNodesCmd(node.RemoveCmd, []string{pub})) - }) -} - -func generateTestData(dir string, size int) error { - v := viper.GetViper() - v.Set(commonflags.AlphabetWalletsFlag, dir) - - sizeStr := strconv.FormatUint(uint64(size), 10) - if err := generate.GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, sizeStr); err != nil { - return err - } - - setTestCredentials(v, size) - if err := generate.AlphabetCreds(generate.GenerateAlphabetCmd, nil); err != nil { - return err - } - - var pubs []string - for i := range size { - p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json") - w, err := wallet.NewWalletFromFile(p) - if err != nil { - return fmt.Errorf("wallet doesn't exist: %w", err) - } - for _, acc := range w.Accounts { - if acc.Label == constants.SingleAccountName { - pub, ok := vm.ParseSignatureContract(acc.Contract.Script) - if !ok { - return fmt.Errorf("could not parse signature script for %s", acc.Address) - } - pubs = append(pubs, hex.EncodeToString(pub)) - continue - } - } - } - - cfg := config.Config{} - cfg.ProtocolConfiguration.Magic = 12345 - cfg.ProtocolConfiguration.ValidatorsCount = uint32(size) - cfg.ProtocolConfiguration.TimePerBlock = time.Second - cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters - cfg.ProtocolConfiguration.P2PSigExtensions = true - cfg.ProtocolConfiguration.VerifyTransactions = true - data, err := yaml.Marshal(cfg) - if err != nil { - return err - } - - protoPath := filepath.Join(dir, protoFileName) - return os.WriteFile(protoPath, data, os.ModePerm) -} - -func setTestCredentials(v *viper.Viper, size int) { - for i := range size { - v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10)) - } - v.Set("credentials.contract", constants.TestContractPassword) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go deleted file mode 100644 index bb684b3a9..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ /dev/null @@ -1,179 +0,0 @@ -package initialize - -import ( - "fmt" - "math/big" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/nspcc-dev/neo-go/pkg/core/native" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/wallet" -) - -const ( - // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. - initialAlphabetGASAmount = 10_000 * native.GASFactor - // initialProxyGASAmount represents the amount of GAS given to a proxy contract. - initialProxyGASAmount = 50_000 * native.GASFactor -) - -func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { - return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 -} - -func transferFunds(c *helper.InitializeContext) error { - ok, err := transferFundsFinished(c) - if ok || err != nil { - if err == nil { - c.Command.Println("Stage 1: already performed.") - } - return err - } - - version, err := c.Client.GetVersion() - if err != nil { - return err - } - - var transfers []transferTarget - for _, acc := range c.Accounts { - to := acc.Contract.ScriptHash() - transfers = append(transfers, - transferTarget{ - Token: gas.Hash, - Address: to, - Amount: initialAlphabetGASAmount, - }, - ) - } - - // It is convenient to have all funds at the committee account. - transfers = append(transfers, - transferTarget{ - Token: gas.Hash, - Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), - }, - transferTarget{ - Token: neo.Hash, - Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: native.NEOTotalSupply, - }, - ) - - tx, err := createNEP17MultiTransferTx(c.Client, c.ConsensusAcc, transfers) - if err != nil { - return fmt.Errorf("can't create transfer transaction: %w", err) - } - - if err := c.MultiSignAndSend(tx, constants.ConsensusAccountName); err != nil { - return fmt.Errorf("can't send transfer transaction: %w", err) - } - - return c.AwaitTx() -} - -// transferFundsFinished checks balances of accounts we transfer GAS to. -// The stage is considered finished if the balance is greater than the half of what we need to transfer. -func transferFundsFinished(c *helper.InitializeContext) (bool, error) { - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash()) - if err != nil { - return false, err - } - - version, err := c.Client.GetVersion() - if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { - return false, err - } - - res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) - if err != nil { - return false, err - } - - return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err -} - -func transferGASToProxy(c *helper.InitializeContext) error { - proxyCs := c.GetContract(constants.ProxyContract) - - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - bal, err := r.BalanceOf(proxyCs.Hash) - if err != nil || bal.Sign() > 0 { - return err - } - - tx, err := createNEP17MultiTransferTx(c.Client, c.CommitteeAcc, []transferTarget{{ - Token: gas.Hash, - Address: proxyCs.Hash, - Amount: initialProxyGASAmount, - }}) - if err != nil { - return err - } - - if err := c.MultiSignAndSend(tx, constants.CommitteeAccountName); err != nil { - return err - } - - return c.AwaitTx() -} - -type transferTarget struct { - Token util.Uint160 - Address util.Uint160 - Amount int64 - Data any -} - -func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients []transferTarget) (*transaction.Transaction, error) { - from := acc.Contract.ScriptHash() - - w := io.NewBufBinWriter() - for i := range recipients { - emit.AppCall(w.BinWriter, recipients[i].Token, "transfer", callflag.All, - from, recipients[i].Address, recipients[i].Amount, recipients[i].Data) - emit.Opcodes(w.BinWriter, opcode.ASSERT) - } - if w.Err != nil { - return nil, fmt.Errorf("failed to create transfer script: %w", w.Err) - } - - signers := []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CalledByEntry, - }, - Account: acc, - }} - - act, err := actor.New(c, signers) - if err != nil { - return nil, fmt.Errorf("can't create actor: %w", err) - } - tx, err := act.MakeRun(w.Bytes()) - if err != nil { - sum := make(map[util.Uint160]int64) - for _, recipient := range recipients { - sum[recipient.Token] += recipient.Amount - } - detail := make([]string, 0, len(sum)) - for _, value := range sum { - detail = append(detail, fmt.Sprintf("amount=%v", value)) - } - err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err) - } - return tx, err -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go deleted file mode 100644 index 50f14e728..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go +++ /dev/null @@ -1,57 +0,0 @@ -package initialize - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - maxObjectSizeCLIFlag = "max-object-size" - epochDurationCLIFlag = "epoch-duration" - containerFeeCLIFlag = "container-fee" - containerAliasFeeCLIFlag = "container-alias-fee" - candidateFeeCLIFlag = "candidate-fee" - homomorphicHashDisabledCLIFlag = "homomorphic-disabled" - withdrawFeeCLIFlag = "withdraw-fee" -) - -var Cmd = &cobra.Command{ - Use: "init", - Short: "Initialize side chain network with smart-contracts and network settings", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.EpochDurationInitFlag, cmd.Flags().Lookup(epochDurationCLIFlag)) - _ = viper.BindPFlag(commonflags.MaxObjectSizeInitFlag, cmd.Flags().Lookup(maxObjectSizeCLIFlag)) - _ = viper.BindPFlag(commonflags.MaxECDataCountFlag, cmd.Flags().Lookup(commonflags.MaxECDataCountFlag)) - _ = viper.BindPFlag(commonflags.MaxECParityCounFlag, cmd.Flags().Lookup(commonflags.MaxECParityCounFlag)) - _ = viper.BindPFlag(commonflags.HomomorphicHashDisabledInitFlag, cmd.Flags().Lookup(homomorphicHashDisabledCLIFlag)) - _ = viper.BindPFlag(commonflags.CandidateFeeInitFlag, cmd.Flags().Lookup(candidateFeeCLIFlag)) - _ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag)) - _ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag)) - _ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag)) - _ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath)) - }, - RunE: initializeSideChainCmd, -} - -func initInitCmd() { - Cmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - Cmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - Cmd.Flags().String(commonflags.ContractsInitFlag, "", commonflags.ContractsInitFlagDesc) - Cmd.Flags().String(commonflags.ContractsURLFlag, "", commonflags.ContractsURLFlagDesc) - Cmd.Flags().Uint(epochDurationCLIFlag, 240, "Amount of side chain blocks in one FrostFS epoch") - Cmd.Flags().Uint(maxObjectSizeCLIFlag, 67108864, "Max single object size in bytes") - Cmd.Flags().Bool(homomorphicHashDisabledCLIFlag, false, "Disable object homomorphic hashing") - // Defaults are taken from neo-preodolenie. - Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee") - Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee") - Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration") - Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") - Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag) -} - -func init() { - initInitCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go deleted file mode 100644 index 94223dbd0..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go +++ /dev/null @@ -1,48 +0,0 @@ -package netmap - -import ( - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const deltaFlag = "delta" - -func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("can't initialize context: %w", err) - } - - r := management.NewReader(wCtx.ReadOnlyInvoker) - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't get NNS contract info: %w", err) - } - - nmHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.NetmapContract)) - if err != nil { - return fmt.Errorf("can't get netmap contract hash: %w", err) - } - - bw := io.NewBufBinWriter() - delta, _ := cmd.Flags().GetInt64(deltaFlag) - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil { - return err - } - - if err = wCtx.SendConsensusTx(bw.Bytes()); err == nil { - err = wCtx.AwaitTx() - } - if err != nil && strings.Contains(err.Error(), "invalid epoch") { - cmd.Println("Epoch has already ticked.") - return nil - } - return err -} diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go deleted file mode 100644 index a689e0ec1..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go +++ /dev/null @@ -1,33 +0,0 @@ -package netmap - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) { - c, err := helper.NewRemoteClient(viper.GetViper()) - commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) - - inv := invoker.New(c, nil) - r := management.NewReader(inv) - - cs, err := helper.GetContractByID(r, 1) - commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err) - - nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.NetmapContract)) - commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) - - res, err := inv.Call(nmHash, "netmapCandidates") - commonCmd.ExitOnErr(cmd, "can't fetch list of network config keys from the netmap contract", err) - nm, err := netmap.DecodeNetMap(res.Stack) - commonCmd.ExitOnErr(cmd, "unable to decode netmap: %w", err) - commonCmd.PrettyPrintNetMap(cmd, *nm, !viper.GetBool(commonflags.Verbose)) -} diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go deleted file mode 100644 index 291482e0f..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go +++ /dev/null @@ -1,43 +0,0 @@ -package netmap - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - CandidatesCmd = &cobra.Command{ - Use: "netmap-candidates", - Short: "List netmap candidates nodes", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: listNetmapCandidatesNodes, - } - ForceNewEpoch = &cobra.Command{ - Use: "force-new-epoch", - Short: "Create new FrostFS epoch event in the side chain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: ForceNewEpochCmd, - } -) - -func initNetmapCandidatesCmd() { - CandidatesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) -} - -func initForceNewEpochCmd() { - ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") - ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch") -} - -func init() { - initNetmapCandidatesCmd() - initForceNewEpochCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go deleted file mode 100644 index 14f6eb390..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ /dev/null @@ -1,93 +0,0 @@ -package nns - -import ( - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func initRegisterCmd() { - Cmd.AddCommand(registerCmd) - registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email") - registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter") - registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") - registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") - registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") - registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - - _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) -} - -func registerDomain(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - - name, _ := cmd.Flags().GetString(nnsNameFlag) - email, _ := cmd.Flags().GetString(nnsEmailFlag) - refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag) - retry, _ := cmd.Flags().GetInt64(nnsRetryFlag) - expire, _ := cmd.Flags().GetInt64(nnsExpireFlag) - ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag) - - h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh), - big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl)) - commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "register domain error: %w", err) - cmd.Println("Domain registered successfully") -} - -func initDeleteCmd() { - Cmd.AddCommand(deleteCmd) - deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - - _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) -} - -func deleteDomain(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - - name, _ := cmd.Flags().GetString(nnsNameFlag) - h, vub, err := c.DeleteDomain(name) - - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) - cmd.Println("Domain deleted successfully") -} - -func initSetAdminCmd() { - Cmd.AddCommand(setAdminCmd) - setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage) - _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath) - - _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag) -} - -func setAdmin(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - - name, _ := cmd.Flags().GetString(nnsNameFlag) - w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath)) - commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err) - h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash()) - - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "Set admin error: %w", err) - cmd.Println("Set admin successfully") -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go deleted file mode 100644 index e49f62256..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ /dev/null @@ -1,67 +0,0 @@ -package nns - -import ( - "errors" - - client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { - v := viper.GetViper() - c, err := helper.NewRemoteClient(v) - commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - - alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) - walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath)) - adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath)) - - var ( - alphabet *helper.AlphabetWallets - regularWallets []*helper.RegularWallets - ) - - if alphabetWalletPath != "" { - alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName} - } - - if walletPath != "" { - regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) - } - - if adminWalletPath != "" { - regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath}) - } - - if alphabet == nil && regularWallets == nil { - commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) - } - - ac, err := helper.NewLocalActor(c, alphabet, regularWallets...) - commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) - - r := management.NewReader(ac.Invoker) - nnsCs, err := helper.GetContractByID(r, 1) - commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - return client.New(ac, nnsCs.Hash), ac -} - -func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { - c, err := helper.NewRemoteClient(viper.GetViper()) - commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - - inv := invoker.New(c, nil) - r := management.NewReader(inv) - nnsCs, err := helper.GetContractByID(r, 1) - commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - - return client.NewReader(inv, nnsCs.Hash), inv -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go deleted file mode 100644 index 9cb47356f..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ /dev/null @@ -1,178 +0,0 @@ -package nns - -import ( - "errors" - "math/big" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/spf13/cobra" -) - -func initAddRecordCmd() { - Cmd.AddCommand(addRecordCmd) - addRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - addRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) - addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) - addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - - _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) - _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) - _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordDataFlag) -} - -func initGetRecordsCmd() { - Cmd.AddCommand(getRecordsCmd) - getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) - - _ = cobra.MarkFlagRequired(getRecordsCmd.Flags(), nnsNameFlag) -} - -func initDelRecordsCmd() { - Cmd.AddCommand(delRecordsCmd) - delRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) - delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - - _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) - _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) -} - -func initDelRecordCmd() { - Cmd.AddCommand(delRecordCmd) - delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) - delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) - delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - - _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) - _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) - _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag) -} - -func addRecord(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - name, _ := cmd.Flags().GetString(nnsNameFlag) - data, _ := cmd.Flags().GetString(nnsRecordDataFlag) - recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) - typ, err := getRecordType(recordType) - commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err) - h, vub, err := c.AddRecord(name, typ, data) - commonCmd.ExitOnErr(cmd, "unable to add record: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "renew domain error: %w", err) - cmd.Println("Record added successfully") -} - -func getRecords(cmd *cobra.Command, _ []string) { - c, inv := nnsReader(cmd) - name, _ := cmd.Flags().GetString(nnsNameFlag) - recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) - if recordType == "" { - sid, r, err := c.GetAllRecords(name) - commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) - defer func() { - _ = inv.TerminateSession(sid) - }() - items, err := inv.TraverseIterator(sid, &r, 0) - commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) - for len(items) != 0 { - for j := range items { - rs := items[j].Value().([]stackitem.Item) - bs, err := rs[2].TryBytes() - commonCmd.ExitOnErr(cmd, "unable to parse record state: %w", err) - cmd.Printf("%s %s\n", - recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())), - string(bs)) - } - items, err = inv.TraverseIterator(sid, &r, 0) - commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) - } - } else { - typ, err := getRecordType(recordType) - commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err) - items, err := c.GetRecords(name, typ) - commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) - for _, item := range items { - record, err := item.TryBytes() - commonCmd.ExitOnErr(cmd, "unable to parse response: %w", err) - cmd.Println(string(record)) - } - } -} - -func delRecords(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - name, _ := cmd.Flags().GetString(nnsNameFlag) - recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) - typ, err := getRecordType(recordType) - commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err) - h, vub, err := c.DeleteRecords(name, typ) - commonCmd.ExitOnErr(cmd, "unable to delete records: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "delete records error: %w", err) - cmd.Println("Records removed successfully") -} - -func delRecord(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - name, _ := cmd.Flags().GetString(nnsNameFlag) - data, _ := cmd.Flags().GetString(nnsRecordDataFlag) - recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) - typ, err := getRecordType(recordType) - commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err) - h, vub, err := c.DeleteRecord(name, typ, data) - commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "delete records error: %w", err) - cmd.Println("Record removed successfully") -} - -func getRecordType(recordType string) (*big.Int, error) { - switch strings.ToUpper(recordType) { - case "A": - return big.NewInt(int64(nns.A)), nil - case "CNAME": - return big.NewInt(int64(nns.CNAME)), nil - case "SOA": - return big.NewInt(int64(nns.SOA)), nil - case "TXT": - return big.NewInt(int64(nns.TXT)), nil - case "AAAA": - return big.NewInt(int64(nns.AAAA)), nil - } - return nil, errors.New("unsupported record type") -} - -func recordTypeToString(rt nns.RecordType) string { - switch rt { - case nns.A: - return "A" - case nns.CNAME: - return "CNAME" - case nns.SOA: - return "SOA" - case nns.TXT: - return "TXT" - case nns.AAAA: - return "AAAA" - } - return "" -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go deleted file mode 100644 index 53bd943f0..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go +++ /dev/null @@ -1,26 +0,0 @@ -package nns - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -func initRenewCmd() { - Cmd.AddCommand(renewCmd) - renewCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - renewCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - renewCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) -} - -func renewDomain(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - name, _ := cmd.Flags().GetString(nnsNameFlag) - h, vub, err := c.Renew(name) - commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "renew domain error: %w", err) - cmd.Println("Domain renewed successfully") -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go deleted file mode 100644 index bb84933c6..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ /dev/null @@ -1,136 +0,0 @@ -package nns - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - nnsNameFlag = "name" - nnsNameFlagDesc = "Domain name" - nnsEmailFlag = "email" - nnsRefreshFlag = "refresh" - nnsRetryFlag = "retry" - nnsExpireFlag = "expire" - nnsTTLFlag = "ttl" - nnsRecordTypeFlag = "type" - nnsRecordTypeFlagDesc = "Domain name service record type(A|CNAME|SOA|TXT)" - nnsRecordDataFlag = "data" - nnsRecordDataFlagDesc = "Domain name service record data" -) - -var ( - Cmd = &cobra.Command{ - Use: "nns", - Short: "Section for Neo Name Service (NNS)", - } - tokensCmd = &cobra.Command{ - Use: "tokens", - Short: "List all registered domain names", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: listTokens, - } - registerCmd = &cobra.Command{ - Use: "register", - Short: "Registers a new domain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - }, - Run: registerDomain, - } - deleteCmd = &cobra.Command{ - Use: "delete", - Short: "Delete a domain by name", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - }, - Run: deleteDomain, - } - renewCmd = &cobra.Command{ - Use: "renew", - Short: "Increases domain expiration date", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - }, - Run: renewDomain, - } - updateCmd = &cobra.Command{ - Use: "update", - Short: "Updates soa record", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - }, - Run: updateSOA, - } - addRecordCmd = &cobra.Command{ - Use: "add-record", - Short: "Adds a new record of the specified type to the provided domain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - }, - Run: addRecord, - } - getRecordsCmd = &cobra.Command{ - Use: "get-records", - Short: "Returns domain record of the specified type", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: getRecords, - } - delRecordsCmd = &cobra.Command{ - Use: "delete-records", - Short: "Removes domain records with the specified type", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - }, - Run: delRecords, - } - delRecordCmd = &cobra.Command{ - Use: "delete-record", - Short: "Removes domain record with the specified type and data", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - }, - Run: delRecord, - } - setAdminCmd = &cobra.Command{ - Use: "set-admin", - Short: "Sets admin for domain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath)) - }, - Run: setAdmin, - } -) - -func init() { - initTokensCmd() - initRegisterCmd() - initDeleteCmd() - initRenewCmd() - initUpdateCmd() - initAddRecordCmd() - initGetRecordsCmd() - initDelRecordsCmd() - initDelRecordCmd() - initSetAdminCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go deleted file mode 100644 index 4ccbb1677..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go +++ /dev/null @@ -1,65 +0,0 @@ -package nns - -import ( - "math/big" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -const ( - verboseDesc = "Include additional information about CNAME record." -) - -func initTokensCmd() { - Cmd.AddCommand(tokensCmd) - tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc) -} - -func listTokens(cmd *cobra.Command, _ []string) { - c, _ := nnsReader(cmd) - it, err := c.Tokens() - commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err) - for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) { - for _, token := range toks { - output := string(token) - if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose { - cname, err := getCnameRecord(c, token) - commonCmd.ExitOnErr(cmd, "", err) - if cname != "" { - output += " (CNAME: " + cname + ")" - } - } - cmd.Println(output) - } - } -} - -func getCnameRecord(c *client.ContractReader, token []byte) (string, error) { - items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME))) - - // GetRecords returns the error "not an array" if the domain does not contain records. - if err != nil && strings.Contains(err.Error(), "not an array") { - return "", nil - } - - if err != nil { - return "", err - } - - if len(items) == 0 { - return "", nil - } - - record, err := items[0].TryBytes() - if err != nil { - return "", err - } - - return string(record), nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go deleted file mode 100644 index c6d77ead6..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go +++ /dev/null @@ -1,50 +0,0 @@ -package nns - -import ( - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -func initUpdateCmd() { - Cmd.AddCommand(updateCmd) - updateCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - updateCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - updateCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - updateCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email") - updateCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, - "The number of seconds between update requests from secondary and slave name servers") - updateCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, - "The number of seconds the secondary or slave will wait before retrying when the last attempt has failed") - updateCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), - "The number of seconds a master or slave will wait before considering the data stale "+ - "if it cannot reach the primary name server") - updateCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, - "The number of seconds a domain name is cached locally before expiration and return to authoritative "+ - "nameservers for updated information") - - _ = cobra.MarkFlagRequired(updateCmd.Flags(), nnsNameFlag) -} - -func updateSOA(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - - name, _ := cmd.Flags().GetString(nnsNameFlag) - email, _ := cmd.Flags().GetString(nnsEmailFlag) - refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag) - retry, _ := cmd.Flags().GetInt64(nnsRetryFlag) - expire, _ := cmd.Flags().GetInt64(nnsExpireFlag) - ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag) - - h, vub, err := c.UpdateSOA(name, email, big.NewInt(refresh), - big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl)) - commonCmd.ExitOnErr(cmd, "unable to send transaction: %w", err) - - cmd.Println("Waiting for transaction to persist...") - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "register domain error: %w", err) - cmd.Println("SOA records updated successfully") -} diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go deleted file mode 100644 index e47451e0c..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go +++ /dev/null @@ -1,65 +0,0 @@ -package node - -import ( - "errors" - "fmt" - - netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func RemoveNodesCmd(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("at least one node key must be provided") - } - - nodeKeys := make(keys.PublicKeys, len(args)) - for i := range args { - var err error - nodeKeys[i], err = keys.NewPublicKeyFromString(args[i]) - if err != nil { - return fmt.Errorf("can't parse node public key: %w", err) - } - } - - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("can't initialize context: %w", err) - } - defer wCtx.Close() - - r := management.NewReader(wCtx.ReadOnlyInvoker) - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't get NNS contract info: %w", err) - } - - nmHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.NetmapContract)) - if err != nil { - return fmt.Errorf("can't get netmap contract hash: %w", err) - } - - bw := io.NewBufBinWriter() - for i := range nodeKeys { - emit.AppCall(bw.BinWriter, nmHash, "updateStateIR", callflag.All, - int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes()) - } - - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil { - return err - } - - if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { - return err - } - - return wCtx.AwaitTx() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/node/root.go b/cmd/frostfs-adm/internal/modules/morph/node/root.go deleted file mode 100644 index 1c38ae8bc..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/node/root.go +++ /dev/null @@ -1,28 +0,0 @@ -package node - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var RemoveCmd = &cobra.Command{ - Use: "remove-nodes key1 [key2 [...]]", - Short: "Remove storage nodes from the netmap", - Long: `Move nodes to the Offline state in the candidates list and tick an epoch to update the netmap`, - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: RemoveNodesCmd, -} - -func initRemoveNodesCmd() { - RemoveCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - RemoveCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RemoveCmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") -} - -func init() { - initRemoveNodesCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go deleted file mode 100644 index 3435926c0..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ /dev/null @@ -1,138 +0,0 @@ -package notary - -import ( - "errors" - "fmt" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/notary" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - // defaultNotaryDepositLifetime is an amount of blocks notary deposit stays valid. - // https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/native/notary.go#L48 - defaultNotaryDepositLifetime = 5760 - - walletAccountFlag = "account" - notaryDepositTillFlag = "till" -) - -var errInvalidNotaryDepositLifetime = errors.New("notary deposit lifetime must be a positive integer") - -func depositNotary(cmd *cobra.Command, _ []string) error { - w, err := openWallet(cmd) - if err != nil { - return err - } - - accHash := w.GetChangeAddress() - addr, _ := cmd.Flags().GetString(walletAccountFlag) - if addr != "" { - accHash, err = address.StringToUint160(addr) - if err != nil { - return fmt.Errorf("invalid address: %s", addr) - } - } - - acc := w.GetAccount(accHash) - if acc == nil { - return fmt.Errorf("can't find account for %s", accHash) - } - - prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash)) - pass, err := input.ReadPassword(prompt) - if err != nil { - return fmt.Errorf("can't get password: %v", err) - } - - err = acc.Decrypt(pass, keys.NEP2ScryptParams()) - if err != nil { - return fmt.Errorf("can't unlock account: %v", err) - } - - gasStr, err := cmd.Flags().GetString(commonflags.RefillGasAmountFlag) - if err != nil { - return err - } - gasAmount, err := helper.ParseGASAmount(gasStr) - if err != nil { - return err - } - - till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag) - if till <= 0 { - return errInvalidNotaryDepositLifetime - } - - return transferGas(cmd, acc, accHash, gasAmount, till) -} - -func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error { - c, err := helper.NewRemoteClient(viper.GetViper()) - if err != nil { - return err - } - - if err := helper.CheckNotaryEnabled(c); err != nil { - return err - } - - height, err := c.GetBlockCount() - if err != nil { - return fmt.Errorf("can't get current height: %v", err) - } - - act, err := actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: acc, - }}) - if err != nil { - return fmt.Errorf("could not create actor: %w", err) - } - - gasActor := nep17.New(act, gas.Hash) - - txHash, vub, err := gasActor.Transfer( - accHash, - notary.Hash, - big.NewInt(int64(gasAmount)), - []any{nil, int64(height) + till}, - ) - if err != nil { - return fmt.Errorf("could not send tx: %w", err) - } - - return helper.AwaitTx(cmd, c, []helper.HashVUBPair{{Hash: txHash, Vub: vub}}) -} - -func openWallet(cmd *cobra.Command) (*wallet.Wallet, error) { - p, err := cmd.Flags().GetString(commonflags.StorageWalletFlag) - if err != nil { - return nil, err - } else if p == "" { - return nil, fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) - } - - w, err := wallet.NewWalletFromFile(p) - if err != nil { - return nil, fmt.Errorf("can't open wallet: %v", err) - } - return w, nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go deleted file mode 100644 index d7be2e503..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go +++ /dev/null @@ -1,28 +0,0 @@ -package notary - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var DepositCmd = &cobra.Command{ - Use: "deposit-notary", - Short: "Deposit GAS for notary service", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: depositNotary, -} - -func initDepositoryNotaryCmd() { - DepositCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") - DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address") - DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit") - DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks") -} - -func init() { - initDepositoryNotaryCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go deleted file mode 100644 index f2932e87c..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ /dev/null @@ -1,91 +0,0 @@ -package policy - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "text/tabwriter" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/policy" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - execFeeParam = "ExecFeeFactor" - storagePriceParam = "StoragePrice" - setFeeParam = "FeePerByte" -) - -var errInvalidParameterFormat = errors.New("invalid parameter format, must be Parameter=Value") - -func SetPolicyCmd(cmd *cobra.Command, args []string) error { - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("can't initialize context: %w", err) - } - - bw := io.NewBufBinWriter() - for i := range args { - k, v, found := strings.Cut(args[i], "=") - if !found { - return errInvalidParameterFormat - } - - switch k { - case execFeeParam, storagePriceParam, setFeeParam: - default: - return fmt.Errorf("parameter must be one of %s, %s and %s", execFeeParam, storagePriceParam, setFeeParam) - } - - value, err := strconv.ParseUint(v, 10, 32) - if err != nil { - return fmt.Errorf("can't parse parameter value '%s': %w", args[i], err) - } - - emit.AppCall(bw.BinWriter, policy.Hash, "set"+k, callflag.All, int64(value)) - } - - if err := wCtx.SendCommitteeTx(bw.Bytes(), false); err != nil { - return err - } - - return wCtx.AwaitTx() -} - -func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { - c, err := helper.NewRemoteClient(viper.GetViper()) - commonCmd.ExitOnErr(cmd, "can't create N3 client:", err) - - inv := invoker.New(c, nil) - policyContract := policy.NewReader(inv) - - execFee, err := policyContract.GetExecFeeFactor() - commonCmd.ExitOnErr(cmd, "can't get execution fee factor:", err) - - feePerByte, err := policyContract.GetFeePerByte() - commonCmd.ExitOnErr(cmd, "can't get fee per byte:", err) - - storagePrice, err := policyContract.GetStoragePrice() - commonCmd.ExitOnErr(cmd, "can't get storage price:", err) - - buf := bytes.NewBuffer(nil) - tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - - _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) - _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) - _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) - - _ = tw.Flush() - cmd.Print(buf.String()) - - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/root.go b/cmd/frostfs-adm/internal/modules/morph/policy/root.go deleted file mode 100644 index a8a356207..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/policy/root.go +++ /dev/null @@ -1,47 +0,0 @@ -package policy - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - Set = &cobra.Command{ - Use: "set-policy [ExecFeeFactor=] [StoragePrice=] [FeePerByte=]", - DisableFlagsInUseLine: true, - Short: "Set global policy values", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: SetPolicyCmd, - ValidArgsFunction: func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"ExecFeeFactor=", "StoragePrice=", "FeePerByte="}, cobra.ShellCompDirectiveNoSpace - }, - } - - Dump = &cobra.Command{ - Use: "dump-policy", - Short: "Dump FrostFS policy", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - RunE: dumpPolicyCmd, - } -) - -func initSetPolicyCmd() { - Set.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - Set.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - Set.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") -} - -func initDumpPolicyCmd() { - Dump.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) -} - -func init() { - initSetPolicyCmd() - initDumpPolicyCmd() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go deleted file mode 100644 index 24cda45a6..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go +++ /dev/null @@ -1,81 +0,0 @@ -package proxy - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - accountAddressFlag = "account" -) - -func parseAddresses(cmd *cobra.Command) []util.Uint160 { - var addrs []util.Uint160 - - accs, _ := cmd.Flags().GetStringArray(accountAddressFlag) - for _, acc := range accs { - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - - addrs = append(addrs, addr) - } - return addrs -} - -func addProxyAccount(cmd *cobra.Command, _ []string) { - addrs := parseAddresses(cmd) - err := processAccount(cmd, addrs, "addAccount") - commonCmd.ExitOnErr(cmd, "processing error: %w", err) -} - -func removeProxyAccount(cmd *cobra.Command, _ []string) { - addrs := parseAddresses(cmd) - err := processAccount(cmd, addrs, "removeAccount") - commonCmd.ExitOnErr(cmd, "processing error: %w", err) -} - -func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { - wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) - if err != nil { - return fmt.Errorf("can't initialize context: %w", err) - } - - r := management.NewReader(wCtx.ReadOnlyInvoker) - cs, err := helper.GetContractByID(r, 1) - if err != nil { - return fmt.Errorf("can't get NNS contract info: %w", err) - } - - proxyHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.ProxyContract)) - if err != nil { - return fmt.Errorf("can't get proxy contract hash: %w", err) - } - - bw := io.NewBufBinWriter() - for _, addr := range addrs { - emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) - } - - if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { - return err - } - - if err = wCtx.AwaitTx(); err != nil { - return err - } - - cmd.Println("Proxy contract has been updated") - - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go deleted file mode 100644 index ad89af2b5..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ /dev/null @@ -1,47 +0,0 @@ -package proxy - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - AddAccountCmd = &cobra.Command{ - Use: "proxy-add-account", - Short: "Adds account to proxy contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: addProxyAccount, - } - RemoveAccountCmd = &cobra.Command{ - Use: "proxy-remove-account", - Short: "Remove from proxy contract", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: removeProxyAccount, - } -) - -func initProxyAddAccount() { - AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") - _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) - AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func initProxyRemoveAccount() { - RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") - _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) - RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) -} - -func init() { - initProxyAddAccount() - initProxyRemoveAccount() -} diff --git a/cmd/frostfs-adm/internal/modules/morph/root.go b/cmd/frostfs-adm/internal/modules/morph/root.go deleted file mode 100644 index e8426d56e..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/root.go +++ /dev/null @@ -1,54 +0,0 @@ -package morph - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/balance" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/contract" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/generate" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/initialize" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/node" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/notary" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/proxy" - "github.com/spf13/cobra" -) - -// RootCmd is a root command of config section. -var RootCmd = &cobra.Command{ - Use: "morph", - Short: "Section for morph network configuration commands", -} - -func init() { - RootCmd.AddCommand(generate.RefillGasCmd) - RootCmd.AddCommand(initialize.Cmd) - RootCmd.AddCommand(contract.DeployCmd) - RootCmd.AddCommand(generate.GenerateStorageCmd) - RootCmd.AddCommand(netmap.ForceNewEpoch) - RootCmd.AddCommand(node.RemoveCmd) - RootCmd.AddCommand(policy.Set) - RootCmd.AddCommand(policy.Dump) - RootCmd.AddCommand(contract.DumpHashesCmd) - RootCmd.AddCommand(config.SetCmd) - RootCmd.AddCommand(config.DumpCmd) - RootCmd.AddCommand(balance.DumpCmd) - RootCmd.AddCommand(contract.UpdateCmd) - RootCmd.AddCommand(container.ListCmd) - RootCmd.AddCommand(container.RestoreCmd) - RootCmd.AddCommand(container.DumpCmd) - RootCmd.AddCommand(generate.GenerateAlphabetCmd) - RootCmd.AddCommand(notary.DepositCmd) - RootCmd.AddCommand(netmap.CandidatesCmd) - - RootCmd.AddCommand(ape.Cmd) - RootCmd.AddCommand(proxy.AddAccountCmd) - RootCmd.AddCommand(proxy.RemoveAccountCmd) - - RootCmd.AddCommand(frostfsid.Cmd) - RootCmd.AddCommand(nns.Cmd) -} diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go deleted file mode 100644 index cc8225c7a..000000000 --- a/cmd/frostfs-adm/internal/modules/root.go +++ /dev/null @@ -1,85 +0,0 @@ -package modules - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" - utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var rootCmd = &cobra.Command{ - Use: "frostfs-adm", - Short: "FrostFS Administrative Tool", - Long: `FrostFS Administrative Tool provides functions to setup and -manage FrostFS network deployment.`, - RunE: entryPoint, - SilenceUsage: true, -} - -func init() { - cobra.OnInitialize(func() { initConfig(rootCmd) }) - // we need to init viper config to bind viper and cobra configurations for - // rpc endpoint, alphabet wallet dir, key credentials, etc. - - // use stdout as default output for cmd.Print() - rootCmd.SetOut(os.Stdout) - - rootCmd.PersistentFlags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - rootCmd.PersistentFlags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - rootCmd.PersistentFlags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, commonflags.VerboseUsage) - _ = viper.BindPFlag(commonflags.Verbose, rootCmd.PersistentFlags().Lookup(commonflags.Verbose)) - rootCmd.Flags().Bool("version", false, "Application version") - - rootCmd.AddCommand(config.RootCmd) - rootCmd.AddCommand(morph.RootCmd) - rootCmd.AddCommand(metabase.RootCmd) - rootCmd.AddCommand(maintenance.RootCmd) - - rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) - rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) -} - -func Execute() error { - return rootCmd.Execute() -} - -func entryPoint(cmd *cobra.Command, _ []string) error { - printVersion, _ := cmd.Flags().GetBool("version") - if printVersion { - cmd.Print(misc.BuildInfo("FrostFS Adm")) - return nil - } - - return cmd.Usage() -} - -func initConfig(cmd *cobra.Command) { - configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag) - if err != nil { - return - } - - if configFile != "" { - viper.SetConfigType("yml") - viper.SetConfigFile(configFile) - _ = viper.ReadInConfig() // if config file is set but unavailable, ignore it - } - - configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag) - if err != nil { - return - } - - if configDir != "" { - _ = utilConfig.ReadConfigDir(viper.GetViper(), configDir) // if config files cannot be read, ignore it - } -} diff --git a/cmd/frostfs-adm/main.go b/cmd/frostfs-adm/main.go deleted file mode 100644 index eb91e3ab9..000000000 --- a/cmd/frostfs-adm/main.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules" -) - -func main() { - if err := modules.Execute(); err != nil { - os.Exit(1) - } -} diff --git a/cmd/frostfs-cli/docs/policy.md b/cmd/frostfs-cli/docs/policy.md deleted file mode 100644 index 1d51818ac..000000000 --- a/cmd/frostfs-cli/docs/policy.md +++ /dev/null @@ -1,119 +0,0 @@ -# How manage local Access Policy Engine (APE) override of the node - -## Overview -APE is a replacement for eACL. Each rule can restrict somehow access to the object/container or list of them. -Here is a simple representation for the rule: -`[:status_detail] ... ... ...` - -Rule start with `status`(with or without details), contains list of actions(which this rule regulate) or conditions -(which can be under resource or request) and ends with list of resources. - -Resource is the combination of namespace, identificator of the FrostFS container/object and wildcard `*`. - -For object it can be represented as: -- `namespace/cid/oid` object in the container of the namespace -- `namespace/cid/*` all objects in the container of the namespace -- `namespace/*` all objects in the namespace -- `*` all objects -- `/*` all object in the `root` namespace -- `/cid/*` all objects in the container of the `root` namespace -- `/cid/oid` object in the container of the `root` namespace - -For container it can be represented as: -- `namespace/cid` container in the namespace -- `namespace/*` all containers in the namespace -- `*` all containers -- `/cid` container in the `root` namespace -- `/*` all containers in the `root` namespace - -Actions is a regular operations upon FrostFS containers/objects. Like `Object.Put`, `Container.Get` etc. -You can use `Object.*`, `Container.*` that implies all actions. - -In status section it is possible to use `allow`, `deny` or `deny:QuotaLimitReached` actions. - -If a statement does not contain lexeme `any`, field `Any` is set to `false` by default. Otherwise, it is set -to `true`. Optionally, `all` can be used - it also sets `Any=false`. - -It is prohibited to mix operation under FrostFS container and object in one rule. -The same statement is equal for conditions and resources - one rule is for one type of items. - -## Add rule -Local rule can be added with the command `frostfs-cli control add-rule`: -```shell -@:~$ frostfs-cli control add-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \ ---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH \ ---chain-id TestPolicy \ ---rule "allow Object.Get Object.Head /*" --rule "deny Container.Put *" -Parsed chain: -Chain ID: TestPolicy - HEX: 54657374506f6c696379 -Rules: - - Status: Allowed - Any: false - Conditions: - Actions: Inverted:false - GetObject - HeadObject - Resources: Inverted:false - native:object//* - - Status: Access denied - Any: false - Conditions: - Actions: Inverted:false - PutContainer - Resources: Inverted:false - native:container/* - -Rule has been added. -@:~$ -``` -## List rules -Local rules can be listed with command `frostfs-cli control list-rules`: -```shell -@:~$ frostfs-cli control list-rules --endpoint s04.frostfs.devenv:8081 --address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM \ ---cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH -w wallets/wallet.json -Enter password > -Chain ID: TestPolicy - HEX: 54657374506f6c696379 -Rules: - - Status: Allowed - Any: false -... -@:~$ -``` - -## Get rule -Rules can be retrieved with `frostfs-cli control get-rule`: -```shell -@:~$ frostfs-cli control get-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \ ---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH \ ---chain-id TestPolicy -Parsed chain (chain id hex: '54657374506f6c696379'): -Chain ID: TestPolicy - HEX: 54657374506f6c696379 -Rules: - - Status: Allowed - Any: false -... -@:~$ -``` - -## Remove rule -To remove rule need to use command `frostfs-cli control remove-rule`: -```shell -@:~$ frostfs-cli control remove-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \ ---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH --chain-id TestPolicy -Rule has been removed. -@:~$ frostfs-cli control get-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \ ---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH --chain-id TestPolicy -rpc error: rpc error: code = NotFound desc = chain not found -@:~$ frostfs-cli control list-rules --endpoint s04.frostfs.devenv:8081 \ ---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH -w wallets/wallet.json -Enter password > -Local overrides are not defined for the container. -@:~$ -``` diff --git a/cmd/frostfs-cli/docs/sessions.md b/cmd/frostfs-cli/docs/sessions.md deleted file mode 100644 index 52c0e9b9b..000000000 --- a/cmd/frostfs-cli/docs/sessions.md +++ /dev/null @@ -1,74 +0,0 @@ -# How FrostFS CLI uses session mechanism of the FrostFS - -## Overview - -FrostFS sessions implement a mechanism for issuing a power of attorney by one -party to another. A trusted party can provide a so-called session token as -proof of the right to act on behalf of another member of the network. The -client of operations carried out with such a token will be the user who opened -the session. The token contains information which limits power of attorney like -action context or lifetime. - -The client confirms trust in a third party by signing its public (session) key -with his private key. Any operation signed using private session key with -attached session token is treated as performed by the original client. - -## Types - -FrostFS CLI supports two ways to execute operation within a session depending on -whether the user of the command application is an original user (1) or a trusted -one (2). - -### Dynamic - -For case (1) CLI user can only open dynamic sessions. Protocol call -`SessionService.Create` is used for this purpose. As a result of the call, a -private session key will be generated on the server, thus making the remote -server trusted. This type of session is useful when the client needs to -transfer part of the responsibility for the formation of strict system elements -to the trusted server. At the moment, the approach is applicable only to -creating objects. - -```shell -$ frostfs-cli session create --rpc-endpoint --out ./blank_token -``` -After this example command remote node holds session private key while its -public part is written into the session token encoded into the output file. -Later this token can be attached to the operations which support dynamic -sessions. Then the token will be finally formed and signed by CLI itself. - -### Static - -For case (2) CLI user can act on behalf of the person who issued the session -token to him. Unlike (1) the token must be fully prepared on the side of the -original client, and the CLI uses it only for reading. Ready token MUST have: -- correct context (object, container, etc.) -- valid lifetime -- public session key corresponding to the CLI key -- valid client signature - -To sign the session token, exec: -```shell -$ frostfs-cli --wallet util sign session-token --from ./blank_token --to ./token -``` -Once the token is signed, it MUST NOT be modified. - -## Commands - -### Object - -Here are sub-commands of `object` command which support only dynamic sessions (1): -- `put` -- `delete` -- `lock` - -These commands accept blank token of the dynamically opened session or open -session internally if it has not been opened yet. - -All other `object` sub-commands support only static sessions (2). - -### Container - -List of commands supporting sessions (static only): -- `create` -- `delete` diff --git a/cmd/frostfs-cli/docs/storage-node-xheaders.md b/cmd/frostfs-cli/docs/storage-node-xheaders.md deleted file mode 100644 index f86b97ec5..000000000 --- a/cmd/frostfs-cli/docs/storage-node-xheaders.md +++ /dev/null @@ -1,33 +0,0 @@ -# Extended headers - -## Overview - -Extended headers are used for request/response. They may contain any -user-defined headers to be interpreted on application level. Key name must be a -unique valid UTF-8 string. Value can't be empty. Requests or Responses with -duplicated header names or headers with empty values are considered invalid. - -## Existing headers - -There are some "well-known" headers starting with `__SYSTEM__` prefix that -affect system behaviour. For backward compatibility, the same set of -"well-known" headers may also use `__NEOFS__` prefix: - -* `__SYSTEM__NETMAP_EPOCH` - netmap epoch to use for object placement calculation. The `value` is string -encoded `uint64` in decimal presentation. If set to '0' or omitted, the -current epoch only will be used. -* `__SYSTEM__NETMAP_LOOKUP_DEPTH` - if object can't be found using current epoch's netmap, this header limits -how many past epochs the node can look up through. Depth is applied to a current epoch or the value -of `__SYSTEM__NETMAP_EPOCH` attribute. The `value` is string encoded `uint64` in decimal presentation. -If set to '0' or not set, only the current epoch is used. - -## `frostfs-cli` commands with `--xhdr` - -List of commands with support of extended headers: -* `container list-objects` -* `object delete/get/hash/head/lock/put/range/search` - -Example: -```shell -$ frostfs-cli object put -r s01.frostfs.devenv:8080 -w wallet.json --cid CID --file FILE --xhdr "__SYSTEM__NETMAP_EPOCH=777" -``` diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go deleted file mode 100644 index 299d0a830..000000000 --- a/cmd/frostfs-cli/internal/client/client.go +++ /dev/null @@ -1,918 +0,0 @@ -package internal - -import ( - "bytes" - "cmp" - "context" - "errors" - "fmt" - "io" - "os" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" -) - -var errMissingHeaderInResponse = errors.New("missing header in response") - -// BalanceOfPrm groups parameters of BalanceOf operation. -type BalanceOfPrm struct { - commonPrm - client.PrmBalanceGet -} - -// BalanceOfRes groups the resulting values of BalanceOf operation. -type BalanceOfRes struct { - cliRes *client.ResBalanceGet -} - -// Balance returns the current balance. -func (x BalanceOfRes) Balance() accounting.Decimal { - return x.cliRes.Amount() -} - -// BalanceOf requests the current balance of a FrostFS user. -// -// Returns any error which prevented the operation from completing correctly in error return. -func BalanceOf(ctx context.Context, prm BalanceOfPrm) (res BalanceOfRes, err error) { - res.cliRes, err = prm.cli.BalanceGet(ctx, prm.PrmBalanceGet) - - return -} - -// ListContainersPrm groups parameters of ListContainers operation. -type ListContainersPrm struct { - commonPrm - client.PrmContainerList -} - -// ListContainersRes groups the resulting values of ListContainers operation. -type ListContainersRes struct { - cliRes *client.ResContainerList -} - -// IDList returns list of identifiers of user's containers. -func (x ListContainersRes) IDList() []cid.ID { - return x.cliRes.Containers() -} - -// ListContainers requests a list of FrostFS user's containers. -// -// Returns any error which prevented the operation from completing correctly in error return. -func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContainersRes, err error) { - res.cliRes, err = prm.cli.ContainerList(ctx, prm.PrmContainerList) - - return -} - -// SortedIDList returns sorted list of identifiers of user's containers. -func (x ListContainersRes) SortedIDList() []cid.ID { - list := x.cliRes.Containers() - slices.SortFunc(list, cid.ID.Cmp) - return list -} - -func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) { - cliPrm := &client.PrmContainerListStream{ - XHeaders: prm.XHeaders, - OwnerID: prm.OwnerID, - Session: prm.Session, - } - rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm) - if err != nil { - return fmt.Errorf("init container list: %w", err) - } - - err = rdr.Iterate(processCnr) - if err != nil { - return fmt.Errorf("read container list: %w", err) - } - - return -} - -// PutContainerPrm groups parameters of PutContainer operation. -type PutContainerPrm struct { - Client *client.Client - ClientParams client.PrmContainerPut -} - -// PutContainerRes groups the resulting values of PutContainer operation. -type PutContainerRes struct { - cnr cid.ID -} - -// ID returns identifier of the created container. -func (x PutContainerRes) ID() cid.ID { - return x.cnr -} - -// PutContainer sends a request to save the container in FrostFS. -// -// Operation is asynchronous and not guaranteed even in the absence of errors. -// The required time is also not predictable. -// -// Success can be verified by reading by identifier. -// -// Returns any error which prevented the operation from completing correctly in error return. -func PutContainer(ctx context.Context, prm PutContainerPrm) (res PutContainerRes, err error) { - cliRes, err := prm.Client.ContainerPut(ctx, prm.ClientParams) - if err == nil { - res.cnr = cliRes.ID() - } - - return -} - -// GetContainerPrm groups parameters of GetContainer operation. -type GetContainerPrm struct { - Client *client.Client - ClientParams client.PrmContainerGet -} - -// SetContainer sets identifier of the container to be read. -// -// Deprecated: Use GetContainerPrm.ClientParams.ContainerID instead. -func (x *GetContainerPrm) SetContainer(id cid.ID) { - x.ClientParams.ContainerID = &id -} - -// GetContainerRes groups the resulting values of GetContainer operation. -type GetContainerRes struct { - cliRes *client.ResContainerGet -} - -// Container returns structured of the requested container. -func (x GetContainerRes) Container() containerSDK.Container { - return x.cliRes.Container() -} - -// GetContainer reads a container from FrostFS by ID. -// -// Returns any error which prevented the operation from completing correctly in error return. -func GetContainer(ctx context.Context, prm GetContainerPrm) (res GetContainerRes, err error) { - res.cliRes, err = prm.Client.ContainerGet(ctx, prm.ClientParams) - - return -} - -// IsACLExtendable checks if ACL of the container referenced by the given identifier -// can be extended. Client connection MUST BE correctly established in advance. -func IsACLExtendable(ctx context.Context, c *client.Client, cnr cid.ID) (bool, error) { - prm := GetContainerPrm{ - Client: c, - ClientParams: client.PrmContainerGet{ - ContainerID: &cnr, - }, - } - - res, err := GetContainer(ctx, prm) - if err != nil { - return false, fmt.Errorf("get container from the FrostFS: %w", err) - } - - return res.Container().BasicACL().Extendable(), nil -} - -// DeleteContainerPrm groups parameters of DeleteContainerPrm operation. -type DeleteContainerPrm struct { - Client *client.Client - ClientParams client.PrmContainerDelete -} - -// DeleteContainerRes groups the resulting values of DeleteContainer operation. -type DeleteContainerRes struct{} - -// DeleteContainer sends a request to remove a container from FrostFS by ID. -// -// Operation is asynchronous and not guaranteed even in the absence of errors. -// The required time is also not predictable. -// -// Success can be verified by reading by identifier. -// -// Returns any error which prevented the operation from completing correctly in error return. -func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteContainerRes, err error) { - _, err = prm.Client.ContainerDelete(ctx, prm.ClientParams) - - return -} - -// NetworkInfoPrm groups parameters of NetworkInfo operation. -type NetworkInfoPrm struct { - Client *client.Client - ClientParams client.PrmNetworkInfo -} - -// NetworkInfoRes groups the resulting values of NetworkInfo operation. -type NetworkInfoRes struct { - cliRes *client.ResNetworkInfo -} - -// NetworkInfo returns structured information about the FrostFS network. -func (x NetworkInfoRes) NetworkInfo() netmap.NetworkInfo { - return x.cliRes.Info() -} - -// NetworkInfo reads information about the FrostFS network. -// -// Returns any error which prevented the operation from completing correctly in error return. -func NetworkInfo(ctx context.Context, prm NetworkInfoPrm) (res NetworkInfoRes, err error) { - res.cliRes, err = prm.Client.NetworkInfo(ctx, prm.ClientParams) - - return -} - -// NodeInfoPrm groups parameters of NodeInfo operation. -type NodeInfoPrm struct { - Client *client.Client - ClientParams client.PrmEndpointInfo -} - -// NodeInfoRes groups the resulting values of NodeInfo operation. -type NodeInfoRes struct { - cliRes *client.ResEndpointInfo -} - -// NodeInfo returns information about the node from netmap. -func (x NodeInfoRes) NodeInfo() netmap.NodeInfo { - return x.cliRes.NodeInfo() -} - -// LatestVersion returns the latest FrostFS API version in use. -func (x NodeInfoRes) LatestVersion() version.Version { - return x.cliRes.LatestVersion() -} - -// NodeInfo requests information about the remote server from FrostFS netmap. -// -// Returns any error which prevented the operation from completing correctly in error return. -func NodeInfo(ctx context.Context, prm NodeInfoPrm) (res NodeInfoRes, err error) { - res.cliRes, err = prm.Client.EndpointInfo(ctx, prm.ClientParams) - - return -} - -// NetMapSnapshotPrm groups parameters of NetMapSnapshot operation. -type NetMapSnapshotPrm struct { - commonPrm -} - -// NetMapSnapshotRes groups the resulting values of NetMapSnapshot operation. -type NetMapSnapshotRes struct { - cliRes *client.ResNetMapSnapshot -} - -// NetMap returns current local snapshot of the FrostFS network map. -func (x NetMapSnapshotRes) NetMap() netmap.NetMap { - return x.cliRes.NetMap() -} - -// NetMapSnapshot requests current network view of the remote server. -// -// Returns any error which prevented the operation from completing correctly in error return. -func NetMapSnapshot(ctx context.Context, prm NetMapSnapshotPrm) (res NetMapSnapshotRes, err error) { - res.cliRes, err = prm.cli.NetMapSnapshot(ctx, client.PrmNetMapSnapshot{}) - return -} - -// CreateSessionPrm groups parameters of CreateSession operation. -type CreateSessionPrm struct { - commonPrm - client.PrmSessionCreate -} - -// CreateSessionRes groups the resulting values of CreateSession operation. -type CreateSessionRes struct { - cliRes *client.ResSessionCreate -} - -// ID returns session identifier. -func (x CreateSessionRes) ID() []byte { - return x.cliRes.ID() -} - -// SessionKey returns public session key in a binary format. -func (x CreateSessionRes) SessionKey() []byte { - return x.cliRes.PublicKey() -} - -// CreateSession opens a new unlimited session with the remote node. -// -// Returns any error which prevented the operation from completing correctly in error return. -func CreateSession(ctx context.Context, prm CreateSessionPrm) (res CreateSessionRes, err error) { - res.cliRes, err = prm.cli.SessionCreate(ctx, prm.PrmSessionCreate) - - return -} - -// PutObjectPrm groups parameters of PutObject operation. -type PutObjectPrm struct { - commonObjectPrm - - copyNum []uint32 - - hdr *objectSDK.Object - - rdr io.Reader - - headerCallback func() - - prepareLocally bool -} - -// SetHeader sets object header. -func (x *PutObjectPrm) SetHeader(hdr *objectSDK.Object) { - x.hdr = hdr -} - -// SetPayloadReader sets reader of the object payload. -func (x *PutObjectPrm) SetPayloadReader(rdr io.Reader) { - x.rdr = rdr -} - -// SetHeaderCallback sets callback which is called on the object after the header is received -// but before the payload is written. -func (x *PutObjectPrm) SetHeaderCallback(f func()) { - x.headerCallback = f -} - -// SetCopiesNumberByVectors sets ordered list of minimal required object copies numbers -// per placement vector. -func (x *PutObjectPrm) SetCopiesNumberByVectors(copiesNumbers []uint32) { - x.copyNum = copiesNumbers -} - -// PrepareLocally generate object header on the client side. -// For big object - split locally too. -func (x *PutObjectPrm) PrepareLocally() { - x.prepareLocally = true -} - -func (x *PutObjectPrm) convertToSDKPrm(ctx context.Context) (client.PrmObjectPutInit, error) { - putPrm := client.PrmObjectPutInit{ - XHeaders: x.xHeaders, - BearerToken: x.bearerToken, - Local: x.local, - CopiesNumber: x.copyNum, - } - - if x.prepareLocally { - res, err := x.cli.NetworkInfo(ctx, client.PrmNetworkInfo{}) - if err != nil { - return client.PrmObjectPutInit{}, err - } - putPrm.MaxSize = res.Info().MaxObjectSize() - putPrm.EpochSource = epochSource(res.Info().CurrentEpoch()) - putPrm.WithoutHomomorphHash = res.Info().HomomorphicHashingDisabled() - } else { - putPrm.Session = x.sessionToken - } - return putPrm, nil -} - -// PutObjectRes groups the resulting values of PutObject operation. -type PutObjectRes struct { - id oid.ID -} - -// ID returns identifier of the created object. -func (x PutObjectRes) ID() oid.ID { - return x.id -} - -type epochSource uint64 - -func (s epochSource) CurrentEpoch() uint64 { - return uint64(s) -} - -// PutObject saves the object in FrostFS network. -// -// Returns any error which prevented the operation from completing correctly in error return. -func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { - sdkPrm, err := prm.convertToSDKPrm(ctx) - if err != nil { - return nil, fmt.Errorf("unable to create parameters of object put operation: %w", err) - } - wrt, err := prm.cli.ObjectPutInit(ctx, sdkPrm) - if err != nil { - return nil, fmt.Errorf("init object writing: %w", err) - } - - if wrt.WriteHeader(ctx, *prm.hdr) { - if prm.headerCallback != nil { - prm.headerCallback() - } - - sz := prm.hdr.PayloadSize() - - if data := prm.hdr.Payload(); len(data) > 0 { - if prm.rdr != nil { - prm.rdr = io.MultiReader(bytes.NewReader(data), prm.rdr) - } else { - prm.rdr = bytes.NewReader(data) - sz = uint64(len(data)) - } - } - - if prm.rdr != nil { - const defaultBufferSizePut = 3 << 20 // Maximum chunk size is 3 MiB in the SDK. - - if sz == 0 || sz > defaultBufferSizePut { - sz = defaultBufferSizePut - } - - buf := make([]byte, sz) - - var n int - - for { - n, err = prm.rdr.Read(buf) - if n > 0 { - if !wrt.WritePayloadChunk(ctx, buf[:n]) { - break - } - - continue - } - - if errors.Is(err, io.EOF) { - break - } - - return nil, fmt.Errorf("read payload: %w", err) - } - } - } - - cliRes, err := wrt.Close(ctx) - if err != nil { // here err already carries both status and client errors - return nil, fmt.Errorf("client failure: %w", err) - } - - return &PutObjectRes{ - id: cliRes.StoredObjectID(), - }, nil -} - -// DeleteObjectPrm groups parameters of DeleteObject operation. -type DeleteObjectPrm struct { - commonObjectPrm - objectAddressPrm -} - -// DeleteObjectRes groups the resulting values of DeleteObject operation. -type DeleteObjectRes struct { - tomb oid.ID -} - -// Tombstone returns the ID of the created object with tombstone. -func (x DeleteObjectRes) Tombstone() oid.ID { - return x.tomb -} - -// DeleteObject marks an object to be removed from FrostFS through tombstone placement. -// -// Returns any error which prevented the operation from completing correctly in error return. -func DeleteObject(ctx context.Context, prm DeleteObjectPrm) (*DeleteObjectRes, error) { - cnr := prm.objAddr.Container() - obj := prm.objAddr.Object() - - delPrm := client.PrmObjectDelete{ - XHeaders: prm.xHeaders, - ContainerID: &cnr, - ObjectID: &obj, - Session: prm.sessionToken, - BearerToken: prm.bearerToken, - } - - cliRes, err := prm.cli.ObjectDelete(ctx, delPrm) - if err != nil { - return nil, fmt.Errorf("remove object via client: %w", err) - } - - return &DeleteObjectRes{ - tomb: cliRes.Tombstone(), - }, nil -} - -// GetObjectPrm groups parameters of GetObject operation. -type GetObjectPrm struct { - commonObjectPrm - objectAddressPrm - rawPrm - payloadWriterPrm - headerCallback func(*objectSDK.Object) -} - -// SetHeaderCallback sets callback which is called on the object after the header is received -// but before the payload is written. -func (p *GetObjectPrm) SetHeaderCallback(f func(*objectSDK.Object)) { - p.headerCallback = f -} - -// GetObjectRes groups the resulting values of GetObject operation. -type GetObjectRes struct { - hdr *objectSDK.Object -} - -// Header returns the header of the request object. -func (x GetObjectRes) Header() *objectSDK.Object { - return x.hdr -} - -// GetObject reads an object by address. -// -// Interrupts on any writer error. If successful, payload is written to the writer. -// -// Returns any error which prevented the operation from completing correctly in error return. -// For raw reading, returns *object.SplitInfoError error if object is virtual. -func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { - cnr := prm.objAddr.Container() - obj := prm.objAddr.Object() - - getPrm := client.PrmObjectGet{ - XHeaders: prm.xHeaders, - BearerToken: prm.bearerToken, - Session: prm.sessionToken, - Raw: prm.raw, - Local: prm.local, - ContainerID: &cnr, - ObjectID: &obj, - } - - rdr, err := prm.cli.ObjectGetInit(ctx, getPrm) - if err != nil { - return nil, fmt.Errorf("init object reading on client: %w", err) - } - - var hdr objectSDK.Object - - if !rdr.ReadHeader(&hdr) { - _, err = rdr.Close() - return nil, fmt.Errorf("read object header: %w", err) - } - if prm.headerCallback != nil { - prm.headerCallback(&hdr) - } - - _, err = io.Copy(prm.wrt, rdr) - if err != nil { - return nil, fmt.Errorf("copy payload: %w", err) - } - - return &GetObjectRes{ - hdr: &hdr, - }, nil -} - -// HeadObjectPrm groups parameters of HeadObject operation. -type HeadObjectPrm struct { - commonObjectPrm - objectAddressPrm - rawPrm -} - -// HeadObjectRes groups the resulting values of HeadObject operation. -type HeadObjectRes struct { - hdr *objectSDK.Object -} - -// Header returns the requested object header. -func (x HeadObjectRes) Header() *objectSDK.Object { - return x.hdr -} - -// HeadObject reads an object header by address. -// -// Returns any error which prevented the operation from completing correctly in error return. -// For raw reading, returns *object.SplitInfoError error if object is virtual. -func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) { - cnr := prm.objAddr.Container() - obj := prm.objAddr.Object() - - headPrm := client.PrmObjectHead{ - XHeaders: prm.xHeaders, - BearerToken: prm.bearerToken, - Session: prm.sessionToken, - Raw: prm.raw, - Local: prm.local, - ContainerID: &cnr, - ObjectID: &obj, - } - - res, err := prm.cli.ObjectHead(ctx, headPrm) - if err != nil { - return nil, fmt.Errorf("read object header via client: %w", err) - } - - var hdr objectSDK.Object - - if !res.ReadHeader(&hdr) { - return nil, errMissingHeaderInResponse - } - - return &HeadObjectRes{ - hdr: &hdr, - }, nil -} - -// SearchObjectsPrm groups parameters of SearchObjects operation. -type SearchObjectsPrm struct { - commonObjectPrm - containerIDPrm - - filters objectSDK.SearchFilters -} - -// SetFilters sets search filters. -func (x *SearchObjectsPrm) SetFilters(filters objectSDK.SearchFilters) { - x.filters = filters -} - -// SearchObjectsRes groups the resulting values of SearchObjects operation. -type SearchObjectsRes struct { - ids []oid.ID -} - -// IDList returns identifiers of the matched objects. -func (x SearchObjectsRes) IDList() []oid.ID { - return x.ids -} - -// SearchObjects selects objects from the container which match the filters. -// -// Returns any error which prevented the operation from completing correctly in error return. -func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) { - cliPrm := client.PrmObjectSearch{ - XHeaders: prm.xHeaders, - Local: prm.local, - BearerToken: prm.bearerToken, - Session: prm.sessionToken, - ContainerID: &prm.cnrID, - Filters: prm.filters, - } - - rdr, err := prm.cli.ObjectSearchInit(ctx, cliPrm) - if err != nil { - return nil, fmt.Errorf("init object search: %w", err) - } - - buf := make([]oid.ID, 10) - var list []oid.ID - var n int - var ok bool - - for { - n, ok = rdr.Read(buf) - list = append(list, buf[:n]...) - if !ok { - break - } - } - - _, err = rdr.Close() - if err != nil { - return nil, fmt.Errorf("read object list: %w", err) - } - - slices.SortFunc(list, oid.ID.Cmp) - - return &SearchObjectsRes{ - ids: list, - }, nil -} - -// HashPayloadRangesPrm groups parameters of HashPayloadRanges operation. -type HashPayloadRangesPrm struct { - commonObjectPrm - objectAddressPrm - - tz bool - - rngs []objectSDK.Range - - salt []byte -} - -// TZ sets flag to request Tillich-Zemor hashes. -func (x *HashPayloadRangesPrm) TZ() { - x.tz = true -} - -// SetRanges sets a list of payload ranges to hash. -func (x *HashPayloadRangesPrm) SetRanges(rngs []objectSDK.Range) { - x.rngs = rngs -} - -// SetSalt sets data for each range to be XOR'ed with. -func (x *HashPayloadRangesPrm) SetSalt(salt []byte) { - x.salt = salt -} - -// HashPayloadRangesRes groups the resulting values of HashPayloadRanges operation. -type HashPayloadRangesRes struct { - cliRes *client.ResObjectHash -} - -// HashList returns a list of hashes of the payload ranges keeping order. -func (x HashPayloadRangesRes) HashList() [][]byte { - return x.cliRes.Checksums() -} - -// HashPayloadRanges requests hashes (by default SHA256) of the object payload ranges. -// -// Returns any error which prevented the operation from completing correctly in error return. -// Returns an error if number of received hashes differs with the number of requested ranges. -func HashPayloadRanges(ctx context.Context, prm HashPayloadRangesPrm) (*HashPayloadRangesRes, error) { - cs := checksum.SHA256 - if prm.tz { - cs = checksum.TZ - } - - cnr := prm.objAddr.Container() - obj := prm.objAddr.Object() - cliPrm := client.PrmObjectHash{ - ContainerID: &cnr, - ObjectID: &obj, - Local: prm.local, - Salt: prm.salt, - Ranges: prm.rngs, - ChecksumType: cs, - Session: prm.sessionToken, - BearerToken: prm.bearerToken, - XHeaders: prm.xHeaders, - } - - res, err := prm.cli.ObjectHash(ctx, cliPrm) - if err != nil { - return nil, fmt.Errorf("read payload hashes via client: %w", err) - } - - return &HashPayloadRangesRes{ - cliRes: res, - }, nil -} - -// PayloadRangePrm groups parameters of PayloadRange operation. -type PayloadRangePrm struct { - commonObjectPrm - objectAddressPrm - rawPrm - payloadWriterPrm - - rng *objectSDK.Range -} - -// SetRange sets payload range to read. -func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) { - x.rng = rng -} - -// PayloadRangeRes groups the resulting values of PayloadRange operation. -type PayloadRangeRes struct{} - -// PayloadRange reads object payload range from FrostFS and writes it to the specified writer. -// -// Interrupts on any writer error. -// -// Returns any error which prevented the operation from completing correctly in error return. -// For raw reading, returns *object.SplitInfoError error if object is virtual. -func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) { - cnr := prm.objAddr.Container() - obj := prm.objAddr.Object() - - rangePrm := client.PrmObjectRange{ - XHeaders: prm.xHeaders, - BearerToken: prm.bearerToken, - Session: prm.sessionToken, - Raw: prm.raw, - Local: prm.local, - ContainerID: &cnr, - ObjectID: &obj, - Offset: prm.rng.GetOffset(), - Length: prm.rng.GetLength(), - } - - rdr, err := prm.cli.ObjectRangeInit(ctx, rangePrm) - if err != nil { - return nil, fmt.Errorf("init payload reading: %w", err) - } - - _, err = io.Copy(prm.wrt, rdr) - if err != nil { - return nil, fmt.Errorf("copy payload: %w", err) - } - - return new(PayloadRangeRes), nil -} - -// SyncContainerPrm groups parameters of SyncContainerSettings operation. -type SyncContainerPrm struct { - commonPrm - c *containerSDK.Container -} - -// SetContainer sets a container that is required to be synced. -func (s *SyncContainerPrm) SetContainer(c *containerSDK.Container) { - s.c = c -} - -// SyncContainerRes groups resulting values of SyncContainerSettings -// operation. -type SyncContainerRes struct{} - -// SyncContainerSettings reads global network config from FrostFS and -// syncs container settings with it. -// -// Interrupts on any writer error. -// -// Panics if a container passed as a parameter is nil. -func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncContainerRes, error) { - if prm.c == nil { - panic("sync container settings with the network: nil container") - } - - err := client.SyncContainerWithNetwork(ctx, prm.c, prm.cli) - if err != nil { - return nil, err - } - - return new(SyncContainerRes), nil -} - -// PatchObjectPrm groups parameters of PatchObject operation. -type PatchObjectPrm struct { - commonObjectPrm - objectAddressPrm - - NewAttributes []objectSDK.Attribute - - ReplaceAttribute bool - - NewSplitHeader *objectSDK.SplitHeader - - PayloadPatches []PayloadPatch -} - -type PayloadPatch struct { - Range objectSDK.Range - - PayloadPath string -} - -type PatchRes struct { - OID oid.ID -} - -func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { - patchPrm := client.PrmObjectPatch{ - XHeaders: prm.xHeaders, - BearerToken: prm.bearerToken, - Session: prm.sessionToken, - Address: prm.objAddr, - } - - slices.SortFunc(prm.PayloadPatches, func(a, b PayloadPatch) int { - return cmp.Compare(a.Range.GetOffset(), b.Range.GetOffset()) - }) - - patcher, err := prm.cli.ObjectPatchInit(ctx, patchPrm) - if err != nil { - return nil, fmt.Errorf("init payload reading: %w", err) - } - - if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ - NewSplitHeader: prm.NewSplitHeader, - NewAttributes: prm.NewAttributes, - ReplaceAttributes: prm.ReplaceAttribute, - }) { - for _, pp := range prm.PayloadPatches { - payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) - if err != nil { - return nil, err - } - applied := patcher.PatchPayload(ctx, &pp.Range, payloadFile) - _ = payloadFile.Close() - if !applied { - break - } - } - } - - res, err := patcher.Close(ctx) - if err != nil { - return nil, err - } - return &PatchRes{ - OID: res.ObjectID(), - }, nil -} diff --git a/cmd/frostfs-cli/internal/client/doc.go b/cmd/frostfs-cli/internal/client/doc.go deleted file mode 100644 index 7c6f48fc1..000000000 --- a/cmd/frostfs-cli/internal/client/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package internal provides functionality for FrostFS CLI application -// communication with FrostFS network. -// -// The base client for accessing remote nodes via FrostFS API is a FrostFS SDK -// Go API client. However, although it encapsulates a useful piece of business -// logic (e.g. the signature mechanism), the FrostFS CLI application does not -// fully use the client's flexible interface. -// -// In this regard, this package provides functions over base API client -// necessary for the application. This allows you to concentrate the entire -// spectrum of the client's use in one place (this will be convenient both when -// updating the base client and for evaluating the UX of SDK library). So it is -// expected that all application packages will be limited to this package for -// the development of functionality requiring FrostFS API communication. -package internal diff --git a/cmd/frostfs-cli/internal/client/prm.go b/cmd/frostfs-cli/internal/client/prm.go deleted file mode 100644 index e5c1b41c9..000000000 --- a/cmd/frostfs-cli/internal/client/prm.go +++ /dev/null @@ -1,92 +0,0 @@ -package internal - -import ( - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -// here are small structures with public setters to share between parameter structures - -type commonPrm struct { - cli *client.Client -} - -// SetClient sets the base client for FrostFS API communication. -func (x *commonPrm) SetClient(cli *client.Client) { - x.cli = cli -} - -type containerIDPrm struct { - cnrID cid.ID -} - -// SetContainerID sets the container identifier. -func (x *containerIDPrm) SetContainerID(id cid.ID) { - x.cnrID = id -} - -type bearerTokenPrm struct { - bearerToken *bearer.Token -} - -// SetBearerToken sets the bearer token to be attached to the request. -func (x *bearerTokenPrm) SetBearerToken(tok *bearer.Token) { - x.bearerToken = tok -} - -type objectAddressPrm struct { - objAddr oid.Address -} - -func (x *objectAddressPrm) SetAddress(addr oid.Address) { - x.objAddr = addr -} - -type rawPrm struct { - raw bool -} - -// SetRawFlag sets flag of raw request. -func (x *rawPrm) SetRawFlag(raw bool) { - x.raw = raw -} - -type payloadWriterPrm struct { - wrt io.Writer -} - -// SetPayloadWriter sets the writer of the object payload. -func (x *payloadWriterPrm) SetPayloadWriter(wrt io.Writer) { - x.wrt = wrt -} - -type commonObjectPrm struct { - commonPrm - bearerTokenPrm - - sessionToken *session.Object - - local bool - - xHeaders []string -} - -// SetTTL sets request TTL value. -func (x *commonObjectPrm) SetTTL(ttl uint32) { - x.local = ttl < 2 -} - -// SetXHeaders sets request X-Headers. -func (x *commonObjectPrm) SetXHeaders(hs []string) { - x.xHeaders = hs -} - -// SetSessionToken sets the token of the session within which the request should be sent. -func (x *commonObjectPrm) SetSessionToken(tok *session.Object) { - x.sessionToken = tok -} diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go deleted file mode 100644 index 1eadfa2e1..000000000 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ /dev/null @@ -1,107 +0,0 @@ -package internal - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "google.golang.org/grpc" -) - -var errInvalidEndpoint = errors.New("provided RPC endpoint is incorrect") - -// GetSDKClientByFlag returns default frostfs-sdk-go client using the specified flag for the address. -// On error, outputs to stderr of cmd and exits with non-zero code. -func GetSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag string) *client.Client { - cli, err := getSDKClientByFlag(cmd, key, endpointFlag) - if err != nil { - commonCmd.ExitOnErr(cmd, "can't create API client: %w", err) - } - return cli -} - -func getSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag string) (*client.Client, error) { - var addr network.Address - - if len(viper.GetString(endpointFlag)) == 0 { - return nil, fmt.Errorf("%s is not defined", endpointFlag) - } - - err := addr.FromString(viper.GetString(endpointFlag)) - if err != nil { - return nil, fmt.Errorf("%v: %w", errInvalidEndpoint, err) - } - return GetSDKClient(cmd.Context(), cmd, key, addr) -} - -// GetSDKClient returns default frostfs-sdk-go client. -func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) { - var c client.Client - - prmInit := client.PrmInit{ - Key: *key, - } - - prmDial := client.PrmDial{ - Endpoint: addr.URIAddr(), - GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), - grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - }, - } - if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 { - // In CLI we can only set a timeout for the whole operation. - // By also setting stream timeout we ensure that no operation hands - // for too long. - prmDial.DialTimeout = timeout - prmDial.StreamTimeout = timeout - - common.PrintVerbose(cmd, "Set request timeout to %s.", timeout) - } - - c.Init(prmInit) - - if err := c.Dial(ctx, prmDial); err != nil { - return nil, fmt.Errorf("can't init SDK client: %w", err) - } - - return &c, nil -} - -// GetCurrentEpoch returns current epoch. -func GetCurrentEpoch(ctx context.Context, cmd *cobra.Command, endpoint string) (uint64, error) { - var addr network.Address - - if err := addr.FromString(endpoint); err != nil { - return 0, fmt.Errorf("can't parse RPC endpoint: %w", err) - } - - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return 0, fmt.Errorf("can't generate key to sign query: %w", err) - } - - c, err := GetSDKClient(ctx, cmd, key, addr) - if err != nil { - return 0, err - } - - ni, err := c.NetworkInfo(ctx, client.PrmNetworkInfo{}) - if err != nil { - return 0, err - } - - return ni.Info().CurrentEpoch(), nil -} diff --git a/cmd/frostfs-cli/internal/common/eacl.go b/cmd/frostfs-cli/internal/common/eacl.go deleted file mode 100644 index 0a623b0e8..000000000 --- a/cmd/frostfs-cli/internal/common/eacl.go +++ /dev/null @@ -1,50 +0,0 @@ -package common - -import ( - "errors" - "os" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "github.com/spf13/cobra" -) - -var errUnsupportedEACLFormat = errors.New("unsupported eACL format") - -// ReadEACL reads extended ACL table from eaclPath. -func ReadEACL(cmd *cobra.Command, eaclPath string) *eacl.Table { - _, err := os.Stat(eaclPath) // check if `eaclPath` is an existing file - if err != nil { - commonCmd.ExitOnErr(cmd, "", errors.New("incorrect path to file with EACL")) - } - - PrintVerbose(cmd, "Reading EACL from file: %s", eaclPath) - - data, err := os.ReadFile(eaclPath) - commonCmd.ExitOnErr(cmd, "can't read file with EACL: %w", err) - - table := eacl.NewTable() - - if err = table.UnmarshalJSON(data); err == nil { - validateAndFixEACLVersion(table) - PrintVerbose(cmd, "Parsed JSON encoded EACL table") - return table - } - - if err = table.Unmarshal(data); err == nil { - validateAndFixEACLVersion(table) - PrintVerbose(cmd, "Parsed binary encoded EACL table") - return table - } - - commonCmd.ExitOnErr(cmd, "", errUnsupportedEACLFormat) - return nil -} - -func validateAndFixEACLVersion(table *eacl.Table) { - if !version.IsValid(table.Version()) { - table.SetVersion(versionSDK.Current()) - } -} diff --git a/cmd/frostfs-cli/internal/common/epoch.go b/cmd/frostfs-cli/internal/common/epoch.go deleted file mode 100644 index 117fb7c58..000000000 --- a/cmd/frostfs-cli/internal/common/epoch.go +++ /dev/null @@ -1,28 +0,0 @@ -package common - -import ( - "fmt" - "strconv" - - "github.com/spf13/cobra" -) - -// ParseEpoch parses epoch argument. Second return value is true if -// the specified epoch is relative, and false otherwise. -func ParseEpoch(cmd *cobra.Command, flag string) (uint64, bool, error) { - s, _ := cmd.Flags().GetString(flag) - if len(s) == 0 { - return 0, false, nil - } - - relative := s[0] == '+' - if relative { - s = s[1:] - } - - epoch, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, relative, fmt.Errorf("can't parse epoch for %s argument: %w", flag, err) - } - return epoch, relative, nil -} diff --git a/cmd/frostfs-cli/internal/common/json.go b/cmd/frostfs-cli/internal/common/json.go deleted file mode 100644 index 02b7db60a..000000000 --- a/cmd/frostfs-cli/internal/common/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package common - -import ( - "bytes" - "encoding/json" - - "github.com/spf13/cobra" -) - -// PrettyPrintJSON prints m as an indented JSON to the cmd output. -func PrettyPrintJSON(cmd *cobra.Command, m json.Marshaler, entity string) { - data, err := m.MarshalJSON() - if err != nil { - PrintVerbose(cmd, "Can't convert %s to json: %w", entity, err) - return - } - buf := new(bytes.Buffer) - if err := json.Indent(buf, data, "", " "); err != nil { - PrintVerbose(cmd, "Can't pretty print json: %w", err) - return - } - cmd.Println(buf) -} diff --git a/cmd/frostfs-cli/internal/common/token.go b/cmd/frostfs-cli/internal/common/token.go deleted file mode 100644 index 102a2d59e..000000000 --- a/cmd/frostfs-cli/internal/common/token.go +++ /dev/null @@ -1,67 +0,0 @@ -package common - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "github.com/spf13/cobra" -) - -// ReadBearerToken reads bearer token from the path provided in a specified flag. -func ReadBearerToken(cmd *cobra.Command, flagname string) *bearer.Token { - path, err := cmd.Flags().GetString(flagname) - commonCmd.ExitOnErr(cmd, "", err) - - if len(path) == 0 { - return nil - } - - PrintVerbose(cmd, "Reading bearer token from file [%s]...", path) - - var tok bearer.Token - - err = ReadBinaryOrJSON(cmd, &tok, path) - commonCmd.ExitOnErr(cmd, "invalid bearer token: %v", err) - - return &tok -} - -// BinaryOrJSON is an interface of entities which provide json.Unmarshaler -// and FrostFS binary decoder. -type BinaryOrJSON interface { - Unmarshal([]byte) error - json.Unmarshaler -} - -// ReadBinaryOrJSON reads file data using provided path and decodes -// BinaryOrJSON from the data. -func ReadBinaryOrJSON(cmd *cobra.Command, dst BinaryOrJSON, fPath string) error { - PrintVerbose(cmd, "Reading file [%s]...", fPath) - - // try to read session token from file - data, err := os.ReadFile(fPath) - if err != nil { - return fmt.Errorf("read file <%s>: %w", fPath, err) - } - - PrintVerbose(cmd, "Trying to decode binary...") - - err = dst.Unmarshal(data) - if err != nil { - PrintVerbose(cmd, "Failed to decode binary: %v", err) - - PrintVerbose(cmd, "Trying to decode JSON...") - - err = dst.UnmarshalJSON(data) - if err != nil { - PrintVerbose(cmd, "Failed to decode JSON: %v", err) - return errors.New("invalid format") - } - } - - return nil -} diff --git a/cmd/frostfs-cli/internal/common/tracing.go b/cmd/frostfs-cli/internal/common/tracing.go deleted file mode 100644 index 10863ed1e..000000000 --- a/cmd/frostfs-cli/internal/common/tracing.go +++ /dev/null @@ -1,58 +0,0 @@ -package common - -import ( - "context" - "slices" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "github.com/spf13/cobra" - "go.opentelemetry.io/otel/trace" -) - -type spanKey struct{} - -// StopClientCommandSpan stops tracing span for the command and prints trace ID on the standard output. -func StopClientCommandSpan(cmd *cobra.Command, _ []string) { - span, ok := cmd.Context().Value(spanKey{}).(trace.Span) - if !ok { - return - } - - span.End() - - // Noop provider cannot fail on flush. - _ = tracing.Shutdown(cmd.Context()) - - cmd.PrintErrf("Trace ID: %s\n", span.SpanContext().TraceID()) -} - -// StartClientCommandSpan starts tracing span for the command. -func StartClientCommandSpan(cmd *cobra.Command) { - enableTracing, err := cmd.Flags().GetBool(commonflags.TracingFlag) - if err != nil || !enableTracing { - return - } - - _, err = tracing.Setup(cmd.Context(), tracing.Config{ - Enabled: true, - Exporter: tracing.NoOpExporter, - Service: "frostfs-cli", - Version: misc.Version, - }) - commonCmd.ExitOnErr(cmd, "init tracing: %w", err) - - var components []string - for c := cmd; c != nil; c = c.Parent() { - components = append(components, c.Name()) - } - slices.Reverse(components) - - operation := strings.Join(components, ".") - ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation) - ctx = context.WithValue(ctx, spanKey{}, span) - cmd.SetContext(ctx) -} diff --git a/cmd/frostfs-cli/internal/common/verbose.go b/cmd/frostfs-cli/internal/common/verbose.go deleted file mode 100644 index 2f95626ff..000000000 --- a/cmd/frostfs-cli/internal/common/verbose.go +++ /dev/null @@ -1,46 +0,0 @@ -package common - -import ( - "encoding/hex" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// PrintVerbose prints to the stdout if the commonflags.Verbose flag is on. -func PrintVerbose(cmd *cobra.Command, format string, a ...any) { - if viper.GetBool(commonflags.Verbose) { - cmd.Printf(format+"\n", a...) - } -} - -// PrettyPrintUnixTime interprets s as unix timestamp and prints it as -// a date. Is s is invalid, "malformed" is returned. -func PrettyPrintUnixTime(s string) string { - unixTime, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return "malformed" - } - - timestamp := time.Unix(unixTime, 0) - - return timestamp.String() -} - -// PrintChecksum prints checksum. -func PrintChecksum(cmd *cobra.Command, name string, recv func() (checksum.Checksum, bool)) { - var strVal string - - cs, csSet := recv() - if csSet { - strVal = hex.EncodeToString(cs.Value()) - } else { - strVal = "" - } - - cmd.Printf("%s: %s\n", name, strVal) -} diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go deleted file mode 100644 index 6ed21e107..000000000 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ /dev/null @@ -1,33 +0,0 @@ -package commonflags - -import ( - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - TTL = "ttl" - TTLShorthand = "" - TTLDefault = 2 - TTLUsage = "The maximum number of intermediate nodes in the request route" - - XHeadersKey = "xhdr" - XHeadersShorthand = "x" - XHeadersUsage = "Request X-Headers in form of Key=Value" -) - -// InitAPI inits common flags for storage node services. -func InitAPI(cmd *cobra.Command) { - ff := cmd.Flags() - - ff.StringSliceP(XHeadersKey, XHeadersShorthand, []string{}, XHeadersUsage) - ff.Uint32P(TTL, TTLShorthand, TTLDefault, TTLUsage) -} - -// BindAPI binds API flags of storage node services to the viper. -func BindAPI(cmd *cobra.Command) { - ff := cmd.Flags() - - _ = viper.BindPFlag(TTL, ff.Lookup(TTL)) - _ = viper.BindPFlag(XHeadersKey, ff.Lookup(XHeadersKey)) -} diff --git a/cmd/frostfs-cli/internal/commonflags/expiration.go b/cmd/frostfs-cli/internal/commonflags/expiration.go deleted file mode 100644 index b266b47c8..000000000 --- a/cmd/frostfs-cli/internal/commonflags/expiration.go +++ /dev/null @@ -1,9 +0,0 @@ -package commonflags - -const ( - // ExpireAt is a flag for setting last epoch of an object or a token. - ExpireAt = "expire-at" - // Lifetime is a flag for setting the lifetime of an object or a token, - // starting from the current epoch. - Lifetime = "lifetime" -) diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go deleted file mode 100644 index fad1f6183..000000000 --- a/cmd/frostfs-cli/internal/commonflags/flags.go +++ /dev/null @@ -1,96 +0,0 @@ -package commonflags - -import ( - "time" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// Common CLI flag keys, shorthands, default -// values and their usage descriptions. -const ( - GenerateKey = "generate-key" - GenerateKeyShorthand = "g" - GenerateKeyDefault = false - GenerateKeyUsage = "Generate new private key" - - WalletPath = "wallet" - WalletPathShorthand = "w" - WalletPathDefault = "" - WalletPathUsage = "Path to the wallet or binary key" - - Account = "address" - AccountShorthand = "" - AccountDefault = "" - AccountUsage = "Address of wallet account" - - RPC = "rpc-endpoint" - RPCShorthand = "r" - RPCDefault = "" - RPCUsage = "Remote node address (':' or 'grpcs://:')" - - Timeout = "timeout" - TimeoutShorthand = "t" - TimeoutDefault = 15 * time.Second - TimeoutUsage = "Timeout for an operation" - - Verbose = "verbose" - VerboseShorthand = "v" - VerboseUsage = "Verbose output" - - ForceFlag = "force" - ForceFlagShorthand = "f" - - CIDFlag = "cid" - CIDFlagUsage = "Container ID." - - OIDFlag = "oid" - OIDFlagUsage = "Object ID." - - TracingFlag = "trace" - TracingFlagUsage = "Generate trace ID and print it." - - AwaitFlag = "await" - AwaitFlagUsage = "Wait for the operation to complete" - - QuietFlag = "quiet" - QuietFlagShorthand = "q" - QuietFlagUsage = "Print nothing and exit with non-zero code on failure" -) - -// Init adds common flags to the command: -// - GenerateKey, -// - WalletPath, -// - Account, -// - RPC, -// - Tracing, -// - Timeout. -func Init(cmd *cobra.Command) { - InitWithoutRPC(cmd) - - ff := cmd.Flags() - ff.StringP(RPC, RPCShorthand, RPCDefault, RPCUsage) - ff.Bool(TracingFlag, false, TracingFlagUsage) - ff.DurationP(Timeout, TimeoutShorthand, TimeoutDefault, TimeoutUsage) -} - -// InitWithoutRPC is similar to Init but doesn't create the RPC flag. -func InitWithoutRPC(cmd *cobra.Command) { - ff := cmd.Flags() - - ff.BoolP(GenerateKey, GenerateKeyShorthand, GenerateKeyDefault, GenerateKeyUsage) - ff.StringP(WalletPath, WalletPathShorthand, WalletPathDefault, WalletPathUsage) - ff.StringP(Account, AccountShorthand, AccountDefault, AccountUsage) -} - -// Bind binds common command flags to the viper. -func Bind(cmd *cobra.Command) { - ff := cmd.Flags() - - _ = viper.BindPFlag(GenerateKey, ff.Lookup(GenerateKey)) - _ = viper.BindPFlag(WalletPath, ff.Lookup(WalletPath)) - _ = viper.BindPFlag(Account, ff.Lookup(Account)) - _ = viper.BindPFlag(RPC, ff.Lookup(RPC)) - _ = viper.BindPFlag(Timeout, ff.Lookup(Timeout)) -} diff --git a/cmd/frostfs-cli/internal/commonflags/json.go b/cmd/frostfs-cli/internal/commonflags/json.go deleted file mode 100644 index 33021f05e..000000000 --- a/cmd/frostfs-cli/internal/commonflags/json.go +++ /dev/null @@ -1,3 +0,0 @@ -package commonflags - -const JSON = "json" diff --git a/cmd/frostfs-cli/internal/commonflags/session.go b/cmd/frostfs-cli/internal/commonflags/session.go deleted file mode 100644 index ee61944ee..000000000 --- a/cmd/frostfs-cli/internal/commonflags/session.go +++ /dev/null @@ -1,19 +0,0 @@ -package commonflags - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -const SessionToken = "session" - -// InitSession registers SessionToken flag representing file path to the token of -// the session with the given name. Supports FrostFS-binary and JSON files. -func InitSession(cmd *cobra.Command, name string) { - cmd.Flags().String( - SessionToken, - "", - fmt.Sprintf("Filepath to a JSON- or binary-encoded token of the %s session", name), - ) -} diff --git a/cmd/frostfs-cli/internal/key/key_test.go b/cmd/frostfs-cli/internal/key/key_test.go deleted file mode 100644 index 37e4fd4ee..000000000 --- a/cmd/frostfs-cli/internal/key/key_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package key - -import ( - "bytes" - "io" - "os" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - "golang.org/x/term" -) - -var testCmd = &cobra.Command{ - Use: "test", - Short: "test", - Run: func(cmd *cobra.Command, args []string) {}, -} - -func Test_getOrGenerate(t *testing.T) { - t.Cleanup(viper.Reset) - - dir := t.TempDir() - - wallPath := filepath.Join(dir, "wallet.json") - w, err := wallet.NewWallet(wallPath) - require.NoError(t, err) - - badWallPath := filepath.Join(dir, "bad_wallet.json") - require.NoError(t, os.WriteFile(badWallPath, []byte("bad content"), os.ModePerm)) - - acc1, err := wallet.NewAccount() - require.NoError(t, err) - require.NoError(t, acc1.Encrypt("pass", keys.NEP2ScryptParams())) - w.AddAccount(acc1) - - acc2, err := wallet.NewAccount() - require.NoError(t, err) - require.NoError(t, acc2.Encrypt("pass", keys.NEP2ScryptParams())) - acc2.Default = true - w.AddAccount(acc2) - require.NoError(t, w.Save()) - - keyPath := filepath.Join(dir, "binary.key") - rawKey, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, os.WriteFile(keyPath, rawKey.Bytes(), os.ModePerm)) - - wifKey, err := keys.NewPrivateKey() - require.NoError(t, err) - - nep2Key, err := keys.NewPrivateKey() - require.NoError(t, err) - nep2, err := keys.NEP2Encrypt(nep2Key, "pass", keys.NEP2ScryptParams()) - require.NoError(t, err) - - in := bytes.NewBuffer(nil) - input.Terminal = term.NewTerminal(input.ReadWriter{ - Reader: in, - Writer: io.Discard, - }, "") - - checkKeyError(t, filepath.Join(dir, "badfile"), ErrFs) - checkKeyError(t, badWallPath, ErrInvalidKey) - - t.Run("wallet", func(t *testing.T) { - checkKeyError(t, wallPath, ErrInvalidPassword) - - in.WriteString("invalid\r") - checkKeyError(t, wallPath, ErrInvalidPassword) - - in.WriteString("pass\r") - checkKey(t, wallPath, acc2.PrivateKey()) // default account - - viper.Set(commonflags.Account, acc1.Address) - in.WriteString("pass\r") - checkKey(t, wallPath, acc1.PrivateKey()) - - viper.Set(commonflags.Account, "not an address") - checkKeyError(t, wallPath, ErrInvalidAddress) - - acc, err := wallet.NewAccount() - require.NoError(t, err) - viper.Set(commonflags.Account, acc.Address) - checkKeyError(t, wallPath, ErrInvalidAddress) - }) - - t.Run("WIF", func(t *testing.T) { - checkKeyError(t, wifKey.WIF(), ErrFs) - }) - - t.Run("NEP-2", func(t *testing.T) { - checkKeyError(t, nep2, ErrFs) - }) - - t.Run("raw key", func(t *testing.T) { - checkKey(t, keyPath, rawKey) - }) - - t.Run("generate", func(t *testing.T) { - viper.Set(commonflags.GenerateKey, true) - actual, err := getOrGenerate(testCmd) - require.NoError(t, err) - require.NotNil(t, actual) - for _, p := range []*keys.PrivateKey{nep2Key, rawKey, wifKey, acc1.PrivateKey(), acc2.PrivateKey()} { - require.NotEqual(t, p, actual, "expected new key to be generated") - } - }) -} - -func checkKeyError(t *testing.T, desc string, err error) { - viper.Set(commonflags.WalletPath, desc) - _, actualErr := getOrGenerate(testCmd) - require.ErrorIs(t, actualErr, err) -} - -func checkKey(t *testing.T, desc string, expected *keys.PrivateKey) { - viper.Set(commonflags.WalletPath, desc) - actual, err := getOrGenerate(testCmd) - require.NoError(t, err) - require.Equal(t, &expected.PrivateKey, actual) -} diff --git a/cmd/frostfs-cli/internal/key/raw.go b/cmd/frostfs-cli/internal/key/raw.go deleted file mode 100644 index 4b5591a86..000000000 --- a/cmd/frostfs-cli/internal/key/raw.go +++ /dev/null @@ -1,62 +0,0 @@ -package key - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var errCantGenerateKey = errors.New("can't generate new private key") - -// Get returns private key from wallet or binary file. -// Ideally we want to touch file-system on the last step. -// This function assumes that all flags were bind to viper in a `PersistentPreRun`. -func Get(cmd *cobra.Command) *ecdsa.PrivateKey { - pk, err := get(cmd) - commonCmd.ExitOnErr(cmd, "can't fetch private key: %w", err) - return pk -} - -func get(cmd *cobra.Command) (*ecdsa.PrivateKey, error) { - keyDesc := viper.GetString(commonflags.WalletPath) - data, err := os.ReadFile(keyDesc) - if err != nil { - return nil, fmt.Errorf("%w: %v", ErrFs, err) - } - - priv, err := keys.NewPrivateKeyFromBytes(data) - if err != nil { - w, err := wallet.NewWalletFromFile(keyDesc) - if err == nil { - return FromWallet(cmd, w, viper.GetString(commonflags.Account)) - } - return nil, fmt.Errorf("%w: %v", ErrInvalidKey, err) - } - return &priv.PrivateKey, nil -} - -// GetOrGenerate is similar to get but generates a new key if commonflags.GenerateKey is set. -func GetOrGenerate(cmd *cobra.Command) *ecdsa.PrivateKey { - pk, err := getOrGenerate(cmd) - commonCmd.ExitOnErr(cmd, "can't fetch private key: %w", err) - return pk -} - -func getOrGenerate(cmd *cobra.Command) (*ecdsa.PrivateKey, error) { - if viper.GetBool(commonflags.GenerateKey) { - priv, err := keys.NewPrivateKey() - if err != nil { - return nil, fmt.Errorf("%w: %v", errCantGenerateKey, err) - } - return &priv.PrivateKey, nil - } - return get(cmd) -} diff --git a/cmd/frostfs-cli/internal/key/wallet.go b/cmd/frostfs-cli/internal/key/wallet.go deleted file mode 100644 index 991c6633d..000000000 --- a/cmd/frostfs-cli/internal/key/wallet.go +++ /dev/null @@ -1,70 +0,0 @@ -package key - -import ( - "crypto/ecdsa" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// Key-related errors. -var ( - ErrFs = errors.New("unable to read file from given path") - ErrInvalidKey = errors.New("provided key is incorrect, only wallet or binary key supported") - ErrInvalidAddress = errors.New("--address option must be specified and valid") - ErrInvalidPassword = errors.New("invalid password for the encrypted key") -) - -// FromWallet returns private key of the wallet account. -func FromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) (*ecdsa.PrivateKey, error) { - var ( - addr util.Uint160 - err error - ) - - if addrStr == "" { - common.PrintVerbose(cmd, "Using default wallet address") - addr = w.GetChangeAddress() - } else { - addr, err = flags.ParseAddress(addrStr) - if err != nil { - common.PrintVerbose(cmd, "Can't parse address: %s", addrStr) - return nil, ErrInvalidAddress - } - } - - acc := w.GetAccount(addr) - if acc == nil { - common.PrintVerbose(cmd, "Can't find wallet account for %s", addrStr) - return nil, ErrInvalidAddress - } - - pass, err := getPassword() - if err != nil { - common.PrintVerbose(cmd, "Can't read password: %v", err) - return nil, ErrInvalidPassword - } - - if err := acc.Decrypt(pass, keys.NEP2ScryptParams()); err != nil { - common.PrintVerbose(cmd, "Can't decrypt account: %v", err) - return nil, ErrInvalidPassword - } - - return &acc.PrivateKey().PrivateKey, nil -} - -func getPassword() (string, error) { - // this check allows empty passwords - if viper.IsSet("password") { - return viper.GetString("password"), nil - } - - return input.ReadPassword("Enter password > ") -} diff --git a/cmd/frostfs-cli/main.go b/cmd/frostfs-cli/main.go deleted file mode 100644 index b2fa1dadd..000000000 --- a/cmd/frostfs-cli/main.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import cmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules" - -func main() { - cmd.Execute() -} diff --git a/cmd/frostfs-cli/modules/accounting/balance.go b/cmd/frostfs-cli/modules/accounting/balance.go deleted file mode 100644 index 1364b5e8e..000000000 --- a/cmd/frostfs-cli/modules/accounting/balance.go +++ /dev/null @@ -1,70 +0,0 @@ -package accounting - -import ( - "math/big" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/precision" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - ownerFlag = "owner" -) - -var accountingBalanceCmd = &cobra.Command{ - Use: "balance", - Short: "Get internal balance of FrostFS account", - Long: `Get internal balance of FrostFS account`, - Run: func(cmd *cobra.Command, _ []string) { - var idUser user.ID - - pk := key.GetOrGenerate(cmd) - - balanceOwner, _ := cmd.Flags().GetString(ownerFlag) - if balanceOwner == "" { - user.IDFromKey(&idUser, pk.PublicKey) - } else { - commonCmd.ExitOnErr(cmd, "can't decode owner ID wallet address: %w", idUser.DecodeString(balanceOwner)) - } - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.BalanceOfPrm - prm.SetClient(cli) - prm.Account = idUser - - res, err := internalclient.BalanceOf(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - // print to stdout - prettyPrintDecimal(cmd, res.Balance()) - }, -} - -func initAccountingBalanceCmd() { - ff := accountingBalanceCmd.Flags() - - ff.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) - ff.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - ff.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage) - ff.String(ownerFlag, "", "owner of balance account (omit to use owner from private key)") -} - -func prettyPrintDecimal(cmd *cobra.Command, decimal accounting.Decimal) { - if viper.GetBool(commonflags.Verbose) { - cmd.Println("value:", decimal.Value()) - cmd.Println("precision:", decimal.Precision()) - } else { - amountF8 := precision.Convert(decimal.Precision(), 8, big.NewInt(decimal.Value())) - - cmd.Println(fixedn.ToString(amountF8, 8)) - } -} diff --git a/cmd/frostfs-cli/modules/accounting/root.go b/cmd/frostfs-cli/modules/accounting/root.go deleted file mode 100644 index afeca7626..000000000 --- a/cmd/frostfs-cli/modules/accounting/root.go +++ /dev/null @@ -1,27 +0,0 @@ -package accounting - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// Cmd represents the accounting command. -var Cmd = &cobra.Command{ - Use: "accounting", - Short: "Operations with accounts and balances", - Long: `Operations with accounts and balances`, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - flags := cmd.Flags() - - _ = viper.BindPFlag(commonflags.WalletPath, flags.Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.Account, flags.Lookup(commonflags.Account)) - _ = viper.BindPFlag(commonflags.RPC, flags.Lookup(commonflags.RPC)) - }, -} - -func init() { - Cmd.AddCommand(accountingBalanceCmd) - - initAccountingBalanceCmd() -} diff --git a/cmd/frostfs-cli/modules/acl/basic/print.go b/cmd/frostfs-cli/modules/acl/basic/print.go deleted file mode 100644 index cb037bbbd..000000000 --- a/cmd/frostfs-cli/modules/acl/basic/print.go +++ /dev/null @@ -1,28 +0,0 @@ -package basic - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - "github.com/spf13/cobra" -) - -var printACLCmd = &cobra.Command{ - Use: "print", - Short: "Pretty print basic ACL from the HEX representation", - Example: `frostfs-cli acl basic print 0x1C8C8CCC`, - Long: `Pretty print basic ACL from the HEX representation. -Few roles have exclusive default access to set of operation, even if particular bit deny it. -Container have access to the operations of the data replication mechanism: - Get, Head, Put, Search, Hash. -InnerRing members are allowed to data audit ops only: - Get, Head, Hash, Search.`, - Run: printACL, - Args: cobra.ExactArgs(1), -} - -func printACL(cmd *cobra.Command, args []string) { - var bacl acl.Basic - commonCmd.ExitOnErr(cmd, "unable to parse basic acl: %w", bacl.DecodeString(args[0])) - util.PrettyPrintTableBACL(cmd, &bacl) -} diff --git a/cmd/frostfs-cli/modules/acl/basic/root.go b/cmd/frostfs-cli/modules/acl/basic/root.go deleted file mode 100644 index 042220028..000000000 --- a/cmd/frostfs-cli/modules/acl/basic/root.go +++ /dev/null @@ -1,14 +0,0 @@ -package basic - -import ( - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "basic", - Short: "Operations with Basic Access Control Lists", -} - -func init() { - Cmd.AddCommand(printACLCmd) -} diff --git a/cmd/frostfs-cli/modules/acl/extended/create.go b/cmd/frostfs-cli/modules/acl/extended/create.go deleted file mode 100644 index 59dfabba2..000000000 --- a/cmd/frostfs-cli/modules/acl/extended/create.go +++ /dev/null @@ -1,127 +0,0 @@ -package extended - -import ( - "bytes" - "encoding/json" - "os" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "github.com/spf13/cobra" -) - -var createCmd = &cobra.Command{ - Use: "create", - Short: "Create extended ACL from the text representation", - Long: `Create extended ACL from the text representation. - -Rule consist of these blocks: [ ...] [ ...] - -Action is 'allow' or 'deny'. - -Operation is an object service verb: 'get', 'head', 'put', 'search', 'delete', 'getrange', or 'getrangehash'. - -Filter consists of : - Typ is 'obj' for object applied filter or 'req' for request applied filter. - Key is a valid unicode string corresponding to object or request header key. - Well-known system object headers start with '$Object:' prefix. - User defined headers start without prefix. - Read more about filter keys at git.frostfs.info.com/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecordfilter - Match is '=' for matching and '!=' for non-matching filter. - Value is a valid unicode string corresponding to object or request header value. - -Target is - 'user' for container owner, - 'system' for Storage nodes in container and Inner Ring nodes, - 'others' for all other request senders, - 'pubkey:,,...' for exact request sender, where is a hex-encoded 33-byte public key. - -When both '--rule' and '--file' arguments are used, '--rule' records will be placed higher in resulting extended ACL table. -`, - Example: `frostfs-cli acl extended create --cid EutHBsdT1YCzHxjCfQHnLPL1vFrkSyLSio4vkphfnEk -f rules.txt --out table.json -frostfs-cli acl extended create --cid EutHBsdT1YCzHxjCfQHnLPL1vFrkSyLSio4vkphfnEk -r 'allow get obj:Key=Value others' -r 'deny put others'`, - Run: createEACL, -} - -func init() { - createCmd.Flags().StringArrayP("rule", "r", nil, "Extended ACL table record to apply") - createCmd.Flags().StringP("file", "f", "", "Read list of extended ACL table records from text file") - createCmd.Flags().StringP("out", "o", "", "Save JSON formatted extended ACL table in file") - createCmd.Flags().StringP(commonflags.CIDFlag, "", "", commonflags.CIDFlagUsage) - - _ = cobra.MarkFlagFilename(createCmd.Flags(), "file") - _ = cobra.MarkFlagFilename(createCmd.Flags(), "out") -} - -func createEACL(cmd *cobra.Command, _ []string) { - rules, _ := cmd.Flags().GetStringArray("rule") - fileArg, _ := cmd.Flags().GetString("file") - outArg, _ := cmd.Flags().GetString("out") - cidArg, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var containerID cid.ID - if cidArg != "" { - if err := containerID.DecodeString(cidArg); err != nil { - cmd.PrintErrf("invalid container ID: %v\n", err) - os.Exit(1) - } - } - - rulesFile, err := getRulesFromFile(fileArg) - if err != nil { - cmd.PrintErrf("can't read rules from file: %v\n", err) - os.Exit(1) - } - - rules = append(rules, rulesFile...) - if len(rules) == 0 { - cmd.PrintErrln("no extended ACL rules has been provided") - os.Exit(1) - } - - tb := eacl.NewTable() - commonCmd.ExitOnErr(cmd, "unable to parse provided rules: %w", util.ParseEACLRules(tb, rules)) - - tb.SetCID(containerID) - - data, err := tb.MarshalJSON() - if err != nil { - cmd.PrintErrln(err) - os.Exit(1) - } - - buf := new(bytes.Buffer) - err = json.Indent(buf, data, "", " ") - if err != nil { - cmd.PrintErrln(err) - os.Exit(1) - } - - if len(outArg) == 0 { - cmd.Println(buf) - return - } - - err = os.WriteFile(outArg, buf.Bytes(), 0o644) - if err != nil { - cmd.PrintErrln(err) - os.Exit(1) - } -} - -func getRulesFromFile(filename string) ([]string, error) { - if len(filename) == 0 { - return nil, nil - } - - data, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - - return strings.Split(strings.TrimSpace(string(data)), "\n"), nil -} diff --git a/cmd/frostfs-cli/modules/acl/extended/create_test.go b/cmd/frostfs-cli/modules/acl/extended/create_test.go deleted file mode 100644 index 71f41e589..000000000 --- a/cmd/frostfs-cli/modules/acl/extended/create_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package extended - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "github.com/stretchr/testify/require" -) - -func TestParseTable(t *testing.T) { - tests := [...]struct { - name string // test name - rule string // input extended ACL rule - jsonRecord string // produced record after successfull parsing - }{ - { - name: "valid rule with multiple filters", - rule: "deny get obj:a=b req:c=d others", - jsonRecord: `{"operation":"GET","action":"DENY","filters":[{"headerType":"OBJECT","matchType":"STRING_EQUAL","key":"a","value":"b"},{"headerType":"REQUEST","matchType":"STRING_EQUAL","key":"c","value":"d"}],"targets":[{"role":"OTHERS","keys":[]}]}`, - }, - { - name: "valid rule without filters", - rule: "allow put user", - jsonRecord: `{"operation":"PUT","action":"ALLOW","filters":[],"targets":[{"role":"USER","keys":[]}]}`, - }, - { - name: "valid rule with public key", - rule: "deny getrange pubkey:036410abb260bbbda89f61c0cad65a4fa15ac5cb83b3c3abf8aee403856fcf65ed", - jsonRecord: `{"operation":"GETRANGE","action":"DENY","filters":[],"targets":[{"role":"ROLE_UNSPECIFIED","keys":["A2QQq7Jgu72on2HAytZaT6FaxcuDs8Or+K7kA4Vvz2Xt"]}]}`, - }, - { - name: "missing action", - rule: "get obj:a=b others", - }, - { - name: "invalid action", - rule: "permit get obj:a=b others", - }, - { - name: "missing op", - rule: "deny obj:a=b others", - }, - { - name: "invalid op action", - rule: "deny look obj:a=b others", - }, - { - name: "invalid filter type", - rule: "deny get invalid:a=b others", - }, - { - name: "invalid target group", - rule: "deny get obj:a=b helpers", - }, - { - name: "invalid public key", - rule: "deny get obj:a=b pubkey:0123", - }, - } - - eaclTable := eacl.NewTable() - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - err := util.ParseEACLRule(eaclTable, test.rule) - ok := len(test.jsonRecord) > 0 - require.Equal(t, ok, err == nil, err) - if ok { - expectedRecord := eacl.NewRecord() - err = expectedRecord.UnmarshalJSON([]byte(test.jsonRecord)) - require.NoError(t, err) - - actualRecord := eaclTable.Records()[len(eaclTable.Records())-1] - - equalRecords(t, expectedRecord, &actualRecord) - } - }) - } -} - -func equalRecords(t *testing.T, r1, r2 *eacl.Record) { - d1, err := r1.Marshal() - require.NoError(t, err) - - d2, err := r2.Marshal() - require.NoError(t, err) - - require.Equal(t, d1, d2) -} diff --git a/cmd/frostfs-cli/modules/acl/extended/print.go b/cmd/frostfs-cli/modules/acl/extended/print.go deleted file mode 100644 index 3e46a70ba..000000000 --- a/cmd/frostfs-cli/modules/acl/extended/print.go +++ /dev/null @@ -1,38 +0,0 @@ -package extended - -import ( - "os" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "github.com/spf13/cobra" -) - -var printEACLCmd = &cobra.Command{ - Use: "print", - Short: "Pretty print extended ACL from the file(in text or json format) or for given container.", - Run: printEACL, -} - -func init() { - flags := printEACLCmd.Flags() - flags.StringP("file", "f", "", - "Read list of extended ACL table records from text or json file") - _ = printEACLCmd.MarkFlagRequired("file") -} - -func printEACL(cmd *cobra.Command, _ []string) { - file, _ := cmd.Flags().GetString("file") - eaclTable := new(eacl.Table) - data, err := os.ReadFile(file) - commonCmd.ExitOnErr(cmd, "can't read file with EACL: %w", err) - if strings.HasSuffix(file, ".json") { - commonCmd.ExitOnErr(cmd, "unable to parse json: %w", eaclTable.UnmarshalJSON(data)) - } else { - rules := strings.Split(strings.TrimSpace(string(data)), "\n") - commonCmd.ExitOnErr(cmd, "can't parse file with EACL: %w", util.ParseEACLRules(eaclTable, rules)) - } - util.PrettyPrintTableEACL(cmd, eaclTable) -} diff --git a/cmd/frostfs-cli/modules/acl/extended/root.go b/cmd/frostfs-cli/modules/acl/extended/root.go deleted file mode 100644 index 8ec2915d4..000000000 --- a/cmd/frostfs-cli/modules/acl/extended/root.go +++ /dev/null @@ -1,15 +0,0 @@ -package extended - -import ( - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "extended", - Short: "Operations with Extended Access Control Lists", -} - -func init() { - Cmd.AddCommand(createCmd) - Cmd.AddCommand(printEACLCmd) -} diff --git a/cmd/frostfs-cli/modules/acl/root.go b/cmd/frostfs-cli/modules/acl/root.go deleted file mode 100644 index 3f3189144..000000000 --- a/cmd/frostfs-cli/modules/acl/root.go +++ /dev/null @@ -1,17 +0,0 @@ -package acl - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl/basic" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl/extended" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "acl", - Short: "Operations with Access Control Lists", -} - -func init() { - Cmd.AddCommand(extended.Cmd) - Cmd.AddCommand(basic.Cmd) -} diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go deleted file mode 100644 index f4039283f..000000000 --- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go +++ /dev/null @@ -1,86 +0,0 @@ -package apemanager - -import ( - "fmt" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/spf13/cobra" -) - -var addCmd = &cobra.Command{ - Use: "add", - Short: "Add rule chain for a target", - Run: add, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) { - t := apeCmd.ParseTarget(cmd) - - ct.Name = t.Name - - switch t.Type { - case engine.Namespace: - ct.TargetType = apeSDK.TargetTypeNamespace - case engine.Container: - ct.TargetType = apeSDK.TargetTypeContainer - case engine.User: - ct.TargetType = apeSDK.TargetTypeUser - case engine.Group: - ct.TargetType = apeSDK.TargetTypeGroup - default: - commonCmd.ExitOnErr(cmd, "conversion error: %w", fmt.Errorf("unknown type '%c'", t.Type)) - } - return ct -} - -func parseChain(cmd *cobra.Command) apeSDK.Chain { - c := apeCmd.ParseChain(cmd) - serialized := c.Bytes() - return apeSDK.Chain{ - Raw: serialized, - } -} - -func add(cmd *cobra.Command, _ []string) { - c := parseChain(cmd) - - target := parseTarget(cmd) - - key := key.Get(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - - res, err := cli.APEManagerAddChain(cmd.Context(), client_sdk.PrmAPEManagerAddChain{ - ChainTarget: target, - Chain: c, - }) - - commonCmd.ExitOnErr(cmd, "add chain error: %w", err) - - cmd.Println("Rule has been added.") - cmd.Println("Chain ID: ", string(res.ChainID)) -} - -func initAddCmd() { - commonflags.Init(addCmd) - - ff := addCmd.Flags() - ff.StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc) - ff.String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc) - ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) - ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = addCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) - ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc) - - addCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag) -} diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go deleted file mode 100644 index b07ecc52f..000000000 --- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go +++ /dev/null @@ -1,49 +0,0 @@ -package apemanager - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/spf13/cobra" -) - -var listCmd = &cobra.Command{ - Use: "list", - Short: "List rule chains defined on target", - Run: list, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func list(cmd *cobra.Command, _ []string) { - target := parseTarget(cmd) - - key := key.Get(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - - resp, err := cli.APEManagerListChains(cmd.Context(), - client_sdk.PrmAPEManagerListChains{ - ChainTarget: target, - }) - commonCmd.ExitOnErr(cmd, "list chains call error: %w", err) - - for _, respChain := range resp.Chains { - var chain apechain.Chain - commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw)) - apeCmd.PrintHumanReadableAPEChain(cmd, &chain) - } -} - -func initListCmd() { - commonflags.Init(listCmd) - - ff := listCmd.Flags() - ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = listCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) -} diff --git a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go deleted file mode 100644 index 136ca81c3..000000000 --- a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go +++ /dev/null @@ -1,51 +0,0 @@ -package apemanager - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "github.com/spf13/cobra" -) - -var removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove rule chain for a target", - Run: remove, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func remove(cmd *cobra.Command, _ []string) { - target := parseTarget(cmd) - - key := key.Get(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - - chainID := apeCmd.ParseChainID(cmd) - chainIDRaw := []byte(chainID) - - _, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{ - ChainTarget: target, - ChainID: chainIDRaw, - }) - - commonCmd.ExitOnErr(cmd, "remove chain error: %w", err) - - cmd.Println("\nRule has been removed.") -} - -func initRemoveCmd() { - commonflags.Init(removeCmd) - - ff := removeCmd.Flags() - ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = removeCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) - ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) - _ = removeCmd.MarkFlagRequired(apeCmd.ChainIDFlag) - ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc) -} diff --git a/cmd/frostfs-cli/modules/ape_manager/root.go b/cmd/frostfs-cli/modules/ape_manager/root.go deleted file mode 100644 index 7b4f92921..000000000 --- a/cmd/frostfs-cli/modules/ape_manager/root.go +++ /dev/null @@ -1,21 +0,0 @@ -package apemanager - -import ( - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "ape-manager", - Short: "Operations with APE manager", - Long: `Operations with APE manager`, -} - -func init() { - Cmd.AddCommand(addCmd) - Cmd.AddCommand(removeCmd) - Cmd.AddCommand(listCmd) - - initAddCmd() - initRemoveCmd() - initListCmd() -} diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go deleted file mode 100644 index 0927788ba..000000000 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ /dev/null @@ -1,155 +0,0 @@ -package bearer - -import ( - "context" - "encoding/json" - "fmt" - "os" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - eaclFlag = "eacl" - apeFlag = "ape" - issuedAtFlag = "issued-at" - notValidBeforeFlag = "not-valid-before" - ownerFlag = "owner" - outFlag = "out" - jsonFlag = commonflags.JSON - impersonateFlag = "impersonate" -) - -var createCmd = &cobra.Command{ - Use: "create", - Short: "Create bearer token", - Long: `Create bearer token. - -All epoch flags can be specified relative to the current epoch with the +n syntax. -In this case --` + commonflags.RPC + ` flag should be specified and the epoch in bearer token -is set to current epoch + n. -`, - Run: createToken, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - ff := cmd.Flags() - - _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) - _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) - }, -} - -func init() { - createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate and --ape flag)") - createCmd.Flags().StringP(apeFlag, "a", "", "Path to the JSON-encoded APE override (mutually exclusive with --impersonate and --eacl flag)") - createCmd.Flags().StringP(issuedAtFlag, "i", "+0", "Epoch to issue token at") - createCmd.Flags().StringP(notValidBeforeFlag, "n", "+0", "Not valid before epoch") - createCmd.Flags().StringP(commonflags.ExpireAt, "x", "", "The last active epoch for the token") - createCmd.Flags().StringP(ownerFlag, "o", "", "Token owner") - createCmd.Flags().String(outFlag, "", "File to write token to") - createCmd.Flags().Bool(jsonFlag, false, "Output token in JSON") - createCmd.Flags().Bool(impersonateFlag, false, "Mark token as impersonate to consider the token signer as the request owner (mutually exclusive with --eacl flag)") - createCmd.Flags().StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage) - createCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) - createCmd.Flags().StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - - createCmd.MarkFlagsMutuallyExclusive(eaclFlag, apeFlag, impersonateFlag) - - _ = cobra.MarkFlagFilename(createCmd.Flags(), eaclFlag) - _ = cobra.MarkFlagFilename(createCmd.Flags(), apeFlag) - - _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.ExpireAt) - _ = cobra.MarkFlagRequired(createCmd.Flags(), outFlag) -} - -func createToken(cmd *cobra.Command, _ []string) { - iat, iatRelative, err := common.ParseEpoch(cmd, issuedAtFlag) - commonCmd.ExitOnErr(cmd, "can't parse --"+issuedAtFlag+" flag: %w", err) - - exp, expRelative, err := common.ParseEpoch(cmd, commonflags.ExpireAt) - commonCmd.ExitOnErr(cmd, "can't parse --"+commonflags.ExpireAt+" flag: %w", err) - - nvb, nvbRelative, err := common.ParseEpoch(cmd, notValidBeforeFlag) - commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) - - if iatRelative || expRelative || nvbRelative { - endpoint := viper.GetString(commonflags.RPC) - if len(endpoint) == 0 { - commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - defer cancel() - - currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) - commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", err) - - if iatRelative { - iat += currEpoch - } - if expRelative { - exp += currEpoch - } - if nvbRelative { - nvb += currEpoch - } - } - if exp < nvb { - commonCmd.ExitOnErr(cmd, "", - fmt.Errorf("expiration epoch is less than not-valid-before epoch: %d < %d", exp, nvb)) - } - - var b bearer.Token - b.SetExp(exp) - b.SetNbf(nvb) - b.SetIat(iat) - - if ownerStr, _ := cmd.Flags().GetString(ownerFlag); ownerStr != "" { - var ownerID user.ID - commonCmd.ExitOnErr(cmd, "can't parse recipient: %w", ownerID.DecodeString(ownerStr)) - b.ForUser(ownerID) - } - - impersonate, _ := cmd.Flags().GetBool(impersonateFlag) - b.SetImpersonate(impersonate) - - eaclPath, _ := cmd.Flags().GetString(eaclFlag) - if eaclPath != "" { - table := eaclSDK.NewTable() - raw, err := os.ReadFile(eaclPath) - commonCmd.ExitOnErr(cmd, "can't read extended ACL file: %w", err) - commonCmd.ExitOnErr(cmd, "can't parse extended ACL: %w", json.Unmarshal(raw, table)) - b.SetEACLTable(*table) - } - - apePath, _ := cmd.Flags().GetString(apeFlag) - if apePath != "" { - var apeOverride bearer.APEOverride - raw, err := os.ReadFile(apePath) - commonCmd.ExitOnErr(cmd, "can't read APE rules: %w", err) - commonCmd.ExitOnErr(cmd, "can't parse APE rules: %w", json.Unmarshal(raw, &apeOverride)) - b.SetAPEOverride(apeOverride) - } - var data []byte - - toJSON, _ := cmd.Flags().GetBool(jsonFlag) - if toJSON { - data, err = json.Marshal(b) - commonCmd.ExitOnErr(cmd, "can't mashal token to JSON: %w", err) - } else { - data = b.Marshal() - } - - out, _ := cmd.Flags().GetString(outFlag) - err = os.WriteFile(out, data, 0o644) - commonCmd.ExitOnErr(cmd, "can't write token to file: %w", err) -} diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go deleted file mode 100644 index 9632061f1..000000000 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ /dev/null @@ -1,76 +0,0 @@ -package bearer - -import ( - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -const ( - outputFlag = "output" -) - -var generateAPEOverrideCmd = &cobra.Command{ - Use: "generate-ape-override", - Short: "Generate APE override.", - Long: `Generate APE override by target and APE chains. Util command. - -Generated APE override can be dumped to a file in JSON format that is passed to -"create" command. -`, - Run: genereateAPEOverride, -} - -func genereateAPEOverride(cmd *cobra.Command, _ []string) { - c := apeCmd.ParseChain(cmd) - - targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag) - var cid cidSDK.ID - commonCmd.ExitOnErr(cmd, "invalid cid format: %w", cid.DecodeString(targetCID)) - - override := &bearer.APEOverride{ - Target: apeSDK.ChainTarget{ - TargetType: apeSDK.TargetTypeContainer, - Name: targetCID, - }, - Chains: []apeSDK.Chain{ - { - Raw: c.Bytes(), - }, - }, - } - - overrideMarshalled, err := override.MarshalJSON() - commonCmd.ExitOnErr(cmd, "failed to marshal APE override: %w", err) - - outputPath, _ := cmd.Flags().GetString(outputFlag) - if outputPath != "" { - err := os.WriteFile(outputPath, overrideMarshalled, 0o644) - commonCmd.ExitOnErr(cmd, "dump error: %w", err) - } else { - fmt.Print("\n") - fmt.Println(string(overrideMarshalled)) - } -} - -func init() { - ff := generateAPEOverrideCmd.Flags() - - ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.") - _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag) - - ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement") - ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain") - ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") - - ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override") - _ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag) -} diff --git a/cmd/frostfs-cli/modules/bearer/root.go b/cmd/frostfs-cli/modules/bearer/root.go deleted file mode 100644 index fa6aef6fb..000000000 --- a/cmd/frostfs-cli/modules/bearer/root.go +++ /dev/null @@ -1,15 +0,0 @@ -package bearer - -import ( - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "bearer", - Short: "Operations with bearer token", -} - -func init() { - Cmd.AddCommand(createCmd) - Cmd.AddCommand(generateAPEOverrideCmd) -} diff --git a/cmd/frostfs-cli/modules/completion.go b/cmd/frostfs-cli/modules/completion.go deleted file mode 100644 index 618317e0b..000000000 --- a/cmd/frostfs-cli/modules/completion.go +++ /dev/null @@ -1,9 +0,0 @@ -package cmd - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" -) - -func init() { - rootCmd.AddCommand(autocomplete.Command("frostfs-cli")) -} diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go deleted file mode 100644 index 30f995180..000000000 --- a/cmd/frostfs-cli/modules/container/create.go +++ /dev/null @@ -1,226 +0,0 @@ -package container - -import ( - "errors" - "fmt" - "os" - "strings" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" -) - -var ( - containerPolicy string - containerAttributes []string - containerAwait bool - containerName string - containerNnsName string - containerNnsZone string - containerNoTimestamp bool - force bool -) - -var createContainerCmd = &cobra.Command{ - Use: "create", - Short: "Create new container", - Long: `Create new container and register it in the FrostFS. -It will be stored in sidechain when inner ring will accepts it.`, - Run: func(cmd *cobra.Command, _ []string) { - placementPolicy, err := parseContainerPolicy(cmd, containerPolicy) - commonCmd.ExitOnErr(cmd, "", err) - - key := key.Get(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - - if !force { - var prm internalclient.NetMapSnapshotPrm - prm.SetClient(cli) - - resmap, err := internalclient.NetMapSnapshot(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "unable to get netmap snapshot to validate container placement, "+ - "use --force option to skip this check: %w", err) - - nodesByRep, err := resmap.NetMap().ContainerNodes(*placementPolicy, nil) - commonCmd.ExitOnErr(cmd, "could not build container nodes based on given placement policy, "+ - "use --force option to skip this check: %w", err) - - for i, nodes := range nodesByRep { - if repNum := placementPolicy.ReplicaDescriptor(i).NumberOfObjects(); repNum > 0 { - if repNum > uint32(len(nodes)) { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf( - "the number of nodes '%d' in selector is not enough for the number of replicas '%d', "+ - "use --force option to skip this check", - len(nodes), - repNum, - )) - } - } else if ecParts := placementPolicy.ReplicaDescriptor(i).TotalECPartCount(); ecParts > 0 { - if ecParts > uint32(len(nodes)) { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf( - "the number of nodes '%d' in selector is not enough for EC placement '%d.%d', "+ - "use --force option to skip this check", - len(nodes), - placementPolicy.ReplicaDescriptor(i).GetECDataCount(), - placementPolicy.ReplicaDescriptor(i).GetECParityCount(), - )) - } - } else { - commonCmd.ExitOnErr(cmd, "%w", errors.New("no replication policy is set")) - } - } - } - - var cnr container.Container - cnr.Init() - - err = parseAttributes(&cnr, containerAttributes) - commonCmd.ExitOnErr(cmd, "", err) - - tok := getSession(cmd) - - if tok != nil { - issuer := tok.Issuer() - cnr.SetOwner(issuer) - } else { - var idOwner user.ID - user.IDFromKey(&idOwner, key.PublicKey) - - cnr.SetOwner(idOwner) - } - - cnr.SetPlacementPolicy(*placementPolicy) - - var syncContainerPrm internalclient.SyncContainerPrm - syncContainerPrm.SetClient(cli) - syncContainerPrm.SetContainer(&cnr) - - _, err = internalclient.SyncContainerSettings(cmd.Context(), syncContainerPrm) - commonCmd.ExitOnErr(cmd, "syncing container's settings rpc error: %w", err) - - putPrm := internalclient.PutContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerPut{ - Container: &cnr, - Session: tok, - }, - } - - res, err := internalclient.PutContainer(cmd.Context(), putPrm) - commonCmd.ExitOnErr(cmd, "put container rpc error: %w", err) - - id := res.ID() - - cmd.Println("CID:", id) - - if containerAwait { - cmd.Println("awaiting...") - - getPrm := internalclient.GetContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerGet{ - ContainerID: &id, - }, - } - - for range awaitTimeout { - time.Sleep(1 * time.Second) - - _, err := internalclient.GetContainer(cmd.Context(), getPrm) - if err == nil { - cmd.Println("container has been persisted on sidechain") - return - } - } - - commonCmd.ExitOnErr(cmd, "", errCreateTimeout) - } - }, -} - -func initContainerCreateCmd() { - flags := createContainerCmd.Flags() - - // Init common flags - flags.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage) - flags.Bool(commonflags.TracingFlag, false, commonflags.TracingFlagUsage) - flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage) - flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) - flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it") - flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2") - flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted") - flags.StringVar(&containerName, "name", "", "Container name attribute") - flags.StringVar(&containerNnsName, "nns-name", "", "Container nns name attribute") - flags.StringVar(&containerNnsZone, "nns-zone", "", "Container nns zone attribute") - flags.BoolVar(&containerNoTimestamp, "disable-timestamp", false, "Disable timestamp container attribute") - flags.BoolVarP(&force, commonflags.ForceFlag, commonflags.ForceFlagShorthand, false, - "Skip placement validity check") -} - -func parseContainerPolicy(cmd *cobra.Command, policyString string) (*netmap.PlacementPolicy, error) { - _, err := os.Stat(policyString) // check if `policyString` is a path to file with placement policy - if err == nil { - common.PrintVerbose(cmd, "Reading placement policy from file: %s", policyString) - - data, err := os.ReadFile(policyString) - if err != nil { - return nil, fmt.Errorf("can't read file with placement policy: %w", err) - } - - policyString = string(data) - } - - var result netmap.PlacementPolicy - - err = result.DecodeString(policyString) - if err == nil { - common.PrintVerbose(cmd, "Parsed QL encoded policy") - return &result, nil - } - - if err := result.UnmarshalJSON([]byte(policyString)); err == nil { - common.PrintVerbose(cmd, "Parsed JSON encoded policy") - return &result, nil - } - - return nil, fmt.Errorf("can't parse placement policy: %w", err) -} - -func parseAttributes(dst *container.Container, attributes []string) error { - for i := range attributes { - k, v, found := strings.Cut(attributes[i], attributeDelimiter) - if !found { - return errors.New("invalid container attribute") - } - - dst.SetAttribute(k, v) - } - - if !containerNoTimestamp { - container.SetCreationTime(dst, time.Now()) - } - - if containerName != "" { - container.SetName(dst, containerName) - } - - if containerNnsName != "" { - dst.SetAttribute(containerApi.SysAttributeName, containerNnsName) - } - if containerNnsZone != "" { - dst.SetAttribute(containerApi.SysAttributeZone, containerNnsZone) - } - - return nil -} diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go deleted file mode 100644 index c20188884..000000000 --- a/cmd/frostfs-cli/modules/container/delete.go +++ /dev/null @@ -1,139 +0,0 @@ -package container - -import ( - "fmt" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" -) - -var deleteContainerCmd = &cobra.Command{ - Use: "delete", - Short: "Delete existing container", - Long: `Delete existing container. -Only owner of the container has a permission to remove container.`, - Run: func(cmd *cobra.Command, _ []string) { - id := parseContainerID(cmd) - - tok := getSession(cmd) - - pk := key.Get(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - if force, _ := cmd.Flags().GetBool(commonflags.ForceFlag); !force { - common.PrintVerbose(cmd, "Reading the container to check ownership...") - - getPrm := internalclient.GetContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerGet{ - ContainerID: &id, - }, - } - - resGet, err := internalclient.GetContainer(cmd.Context(), getPrm) - commonCmd.ExitOnErr(cmd, "can't get the container: %w", err) - - owner := resGet.Container().Owner() - - if tok != nil { - common.PrintVerbose(cmd, "Checking session issuer...") - - if !tok.Issuer().Equals(owner) { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("session issuer differs with the container owner: expected %s, has %s", owner, tok.Issuer())) - } - } else { - common.PrintVerbose(cmd, "Checking provided account...") - - var acc user.ID - user.IDFromKey(&acc, pk.PublicKey) - - if !acc.Equals(owner) { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("provided account differs with the container owner: expected %s, has %s", owner, acc)) - } - } - - common.PrintVerbose(cmd, "Account matches the container owner.") - - if tok != nil { - common.PrintVerbose(cmd, "Skip searching for LOCK objects - session provided.") - } else { - fs := objectSDK.NewSearchFilters() - fs.AddTypeFilter(objectSDK.MatchStringEqual, objectSDK.TypeLock) - - var searchPrm internalclient.SearchObjectsPrm - searchPrm.SetClient(cli) - searchPrm.SetContainerID(id) - searchPrm.SetFilters(fs) - searchPrm.SetTTL(2) - - common.PrintVerbose(cmd, "Searching for LOCK objects...") - - res, err := internalclient.SearchObjects(cmd.Context(), searchPrm) - commonCmd.ExitOnErr(cmd, "can't search for LOCK objects: %w", err) - - if len(res.IDList()) != 0 { - commonCmd.ExitOnErr(cmd, "", - fmt.Errorf("container wasn't removed because LOCK objects were found, "+ - "use --%s flag to remove anyway", commonflags.ForceFlag)) - } - } - } - - delPrm := internalclient.DeleteContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerDelete{ - ContainerID: &id, - Session: tok, - }, - } - - _, err := internalclient.DeleteContainer(cmd.Context(), delPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cmd.Println("container delete method invoked") - - if containerAwait { - cmd.Println("awaiting...") - - getPrm := internalclient.GetContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerGet{ - ContainerID: &id, - }, - } - - for range awaitTimeout { - time.Sleep(1 * time.Second) - - _, err := internalclient.GetContainer(cmd.Context(), getPrm) - if err != nil { - cmd.Println("container has been removed:", containerID) - return - } - } - - commonCmd.ExitOnErr(cmd, "", errDeleteTimeout) - } - }, -} - -func initContainerDeleteCmd() { - flags := deleteContainerCmd.Flags() - - flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) - flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - flags.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage) - flags.Bool(commonflags.TracingFlag, false, commonflags.TracingFlagUsage) - - flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.BoolVar(&containerAwait, "await", false, "Block execution until container is removed") - flags.BoolP(commonflags.ForceFlag, commonflags.ForceFlagShorthand, false, "Skip validation checks (ownership, presence of LOCK objects)") -} diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go deleted file mode 100644 index fac6eb2cd..000000000 --- a/cmd/frostfs-cli/modules/container/get.go +++ /dev/null @@ -1,164 +0,0 @@ -package container - -import ( - "crypto/ecdsa" - "os" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -const ( - fromFlag = "from" - fromFlagUsage = "Path to file with encoded container" -) - -var ( - containerID string - containerPathFrom string - containerPathTo string - containerJSON bool -) - -var getContainerInfoCmd = &cobra.Command{ - Use: "get", - Short: "Get container field info", - Long: `Get container field info`, - Run: func(cmd *cobra.Command, _ []string) { - cnr, _ := getContainer(cmd) - - prettyPrintContainer(cmd, cnr, containerJSON) - - if containerPathTo != "" { - var ( - data []byte - err error - ) - - if containerJSON { - data, err = cnr.MarshalJSON() - commonCmd.ExitOnErr(cmd, "can't JSON encode container: %w", err) - } else { - data = cnr.Marshal() - } - - err = os.WriteFile(containerPathTo, data, 0o644) - commonCmd.ExitOnErr(cmd, "can't write container to file: %w", err) - } - }, -} - -func initContainerInfoCmd() { - commonflags.Init(getContainerInfoCmd) - - flags := getContainerInfoCmd.Flags() - - flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container") - flags.StringVar(&containerPathFrom, fromFlag, "", fromFlagUsage) - flags.BoolVar(&containerJSON, commonflags.JSON, false, "Print or dump container in JSON format") -} - -type stringWriter cobra.Command - -func (x *stringWriter) WriteString(s string) (n int, err error) { - (*cobra.Command)(x).Print(s) - return len(s), nil -} - -func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncoding bool) { - if jsonEncoding { - common.PrettyPrintJSON(cmd, cnr, "container") - return - } - - var id cid.ID - container.CalculateID(&id, cnr) - cmd.Println("CID:", id) - - cmd.Println("owner ID:", cnr.Owner()) - - basicACL := cnr.BasicACL() - prettyPrintBasicACL(cmd, basicACL) - - cmd.Println("created:", container.CreatedAt(cnr)) - - cmd.Println("attributes:") - for key, val := range cnr.Attributes() { - cmd.Printf("\t%s=%s\n", key, val) - } - - cmd.Println("placement policy:") - commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) - cmd.Println() -} - -func prettyPrintBasicACL(cmd *cobra.Command, basicACL acl.Basic) { - cmd.Printf("basic ACL: %s", basicACL.EncodeToString()) - - var prettyName string - - switch basicACL { - case acl.Private: - prettyName = acl.NamePrivate - case acl.PrivateExtended: - prettyName = acl.NamePrivateExtended - case acl.PublicRO: - prettyName = acl.NamePublicRO - case acl.PublicROExtended: - prettyName = acl.NamePublicROExtended - case acl.PublicRW: - prettyName = acl.NamePublicRW - case acl.PublicRWExtended: - prettyName = acl.NamePublicRWExtended - case acl.PublicAppend: - prettyName = acl.NamePublicAppend - case acl.PublicAppendExtended: - prettyName = acl.NamePublicAppendExtended - } - - if prettyName != "" { - cmd.Printf(" (%s)", prettyName) - } - - cmd.Println() - util.PrettyPrintTableBACL(cmd, &basicACL) -} - -func getContainer(cmd *cobra.Command) (container.Container, *ecdsa.PrivateKey) { - var cnr container.Container - var pk *ecdsa.PrivateKey - if containerPathFrom != "" { - data, err := os.ReadFile(containerPathFrom) - commonCmd.ExitOnErr(cmd, "can't read file: %w", err) - - err = cnr.Unmarshal(data) - commonCmd.ExitOnErr(cmd, "can't unmarshal container: %w", err) - } else { - id := parseContainerID(cmd) - pk = key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - prm := internalclient.GetContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerGet{ - ContainerID: &id, - }, - } - - res, err := internalclient.GetContainer(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cnr = res.Container() - } - return cnr, pk -} diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go deleted file mode 100644 index e4a023d91..000000000 --- a/cmd/frostfs-cli/modules/container/list.go +++ /dev/null @@ -1,126 +0,0 @@ -package container - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// flags of list command. -const ( - flagListPrintAttr = "with-attr" - flagListContainerOwner = "owner" - flagListName = "name" - - generateKeyContainerUsage = commonflags.GenerateKeyUsage + ", should be used with --owner flag" -) - -// flag vars of list command. -var ( - flagVarListPrintAttr bool - flagVarListContainerOwner string - flagVarListName string -) - -var listContainersCmd = &cobra.Command{ - Use: "list", - Short: "List all created containers", - Long: "List all created containers", - Run: func(cmd *cobra.Command, _ []string) { - var idUser user.ID - - generateKey, _ := cmd.Flags().GetBool(commonflags.GenerateKey) - if flagVarListContainerOwner == "" && generateKey { - cmd.PrintErrln("WARN: using -g without --owner - output will be empty") - } - - key := key.GetOrGenerate(cmd) - - if flagVarListContainerOwner == "" { - user.IDFromKey(&idUser, key.PublicKey) - } else { - err := idUser.DecodeString(flagVarListContainerOwner) - commonCmd.ExitOnErr(cmd, "invalid user ID: %w", err) - } - - cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - - var prm internalclient.ListContainersPrm - prm.SetClient(cli) - prm.OwnerID = idUser - prmGet := internalclient.GetContainerPrm{ - Client: cli, - } - var containerIDs []cid.ID - - err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool { - printContainer(cmd, prmGet, id) - return false - }) - if err == nil { - return - } - - if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented { - res, err := internalclient.ListContainers(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - containerIDs = res.SortedIDList() - } else { - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - } - - for _, cnrID := range containerIDs { - printContainer(cmd, prmGet, cnrID) - } - }, -} - -func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) { - if flagVarListName == "" && !flagVarListPrintAttr { - cmd.Println(id.String()) - return - } - - prmGet.ClientParams.ContainerID = &id - res, err := internalclient.GetContainer(cmd.Context(), prmGet) - if err != nil { - cmd.Printf(" failed to read attributes: %v\n", err) - return - } - - cnr := res.Container() - if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { - return - } - cmd.Println(id.String()) - - if flagVarListPrintAttr { - for key, val := range cnr.Attributes() { - cmd.Printf(" %s: %s\n", key, val) - } - } -} - -func initContainerListContainersCmd() { - commonflags.Init(listContainersCmd) - - flags := listContainersCmd.Flags() - - flags.StringVar(&flagVarListName, flagListName, "", - "List containers by the attribute name", - ) - flags.StringVar(&flagVarListContainerOwner, flagListContainerOwner, "", - "Owner of containers (omit to use owner from private key)", - ) - flags.BoolVar(&flagVarListPrintAttr, flagListPrintAttr, false, - "Request and print attributes of each container", - ) - flags.Lookup(commonflags.GenerateKey).Usage = generateKeyContainerUsage -} diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go deleted file mode 100644 index d5850359d..000000000 --- a/cmd/frostfs-cli/modules/container/list_objects.go +++ /dev/null @@ -1,88 +0,0 @@ -package container - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -// flags of list-object command. -const ( - flagListObjectPrintAttr = "with-attr" -) - -// flag vars of list-objects command. -var ( - flagVarListObjectsPrintAttr bool -) - -var listContainerObjectsCmd = &cobra.Command{ - Use: "list-objects", - Short: "List existing objects in container", - Long: `List existing objects in container`, - Run: func(cmd *cobra.Command, _ []string) { - id := parseContainerID(cmd) - - filters := new(objectSDK.SearchFilters) - filters.AddRootFilter() // search only user created objects - - cli := internalclient.GetSDKClientByFlag(cmd, key.GetOrGenerate(cmd), commonflags.RPC) - - var prmSearch internalclient.SearchObjectsPrm - var prmHead internalclient.HeadObjectPrm - - prmSearch.SetClient(cli) - - if flagVarListObjectsPrintAttr { - prmHead.SetClient(cli) - objectCli.Prepare(cmd, &prmSearch, &prmHead) - } else { - objectCli.Prepare(cmd, &prmSearch) - } - - prmSearch.SetContainerID(id) - prmSearch.SetFilters(*filters) - - res, err := internalclient.SearchObjects(cmd.Context(), prmSearch) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - objectIDs := res.IDList() - - for i := range objectIDs { - cmd.Println(objectIDs[i].String()) - - if flagVarListObjectsPrintAttr { - var addr oid.Address - addr.SetContainer(id) - addr.SetObject(objectIDs[i]) - prmHead.SetAddress(addr) - - resHead, err := internalclient.HeadObject(cmd.Context(), prmHead) - if err == nil { - for _, attr := range resHead.Header().UserAttributes() { - cmd.Printf(" %s: %s\n", attr.Key(), attr.Value()) - } - } else { - cmd.Printf(" failed to read attributes: %v\n", err) - } - } - } - }, -} - -func initContainerListObjectsCmd() { - commonflags.Init(listContainerObjectsCmd) - objectCli.InitBearer(listContainerObjectsCmd) - - flags := listContainerObjectsCmd.Flags() - - flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.BoolVar(&flagVarListObjectsPrintAttr, flagListObjectPrintAttr, false, - "Request and print user attributes of each object", - ) -} diff --git a/cmd/frostfs-cli/modules/container/nodes.go b/cmd/frostfs-cli/modules/container/nodes.go deleted file mode 100644 index 1ae8ab604..000000000 --- a/cmd/frostfs-cli/modules/container/nodes.go +++ /dev/null @@ -1,72 +0,0 @@ -package container - -import ( - "crypto/sha256" - "errors" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - containerAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/spf13/cobra" -) - -var short bool - -var containerNodesCmd = &cobra.Command{ - Use: "nodes", - Short: "Show nodes for container", - Long: "Show nodes taking part in a container at the current epoch.", - Run: func(cmd *cobra.Command, _ []string) { - cnr, pkey := getContainer(cmd) - - if pkey == nil { - pkey = key.GetOrGenerate(cmd) - } - - cli := internalclient.GetSDKClientByFlag(cmd, pkey, commonflags.RPC) - - var prm internalclient.NetMapSnapshotPrm - prm.SetClient(cli) - - resmap, err := internalclient.NetMapSnapshot(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "unable to get netmap snapshot", err) - - var id cid.ID - containerAPI.CalculateID(&id, cnr) - binCnr := make([]byte, sha256.Size) - id.Encode(binCnr) - - policy := cnr.PlacementPolicy() - - var cnrNodes [][]netmap.NodeInfo - cnrNodes, err = resmap.NetMap().ContainerNodes(policy, binCnr) - commonCmd.ExitOnErr(cmd, "could not build container nodes for given container: %w", err) - - for i := range cnrNodes { - if repNum := policy.ReplicaDescriptor(i).NumberOfObjects(); repNum > 0 { - cmd.Printf("Descriptor #%d, REP %d:\n", i+1, repNum) - } else if ecParts := policy.ReplicaDescriptor(i).TotalECPartCount(); ecParts > 0 { - cmd.Printf("Descriptor #%d, EC %d.%d:\n", i+1, policy.ReplicaDescriptor(i).GetECDataCount(), - policy.ReplicaDescriptor(i).GetECParityCount()) - } else { - commonCmd.ExitOnErr(cmd, "%w", errors.New("no replication policy is set")) - } - for j := range cnrNodes[i] { - commonCmd.PrettyPrintNodeInfo(cmd, cnrNodes[i][j], j, "\t", short) - } - } - }, -} - -func initContainerNodesCmd() { - commonflags.Init(containerNodesCmd) - - flags := containerNodesCmd.Flags() - flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.StringVar(&containerPathFrom, fromFlag, "", fromFlagUsage) - flags.BoolVar(&short, "short", false, "Shortens output of node info") -} diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go deleted file mode 100644 index cf4862b4a..000000000 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ /dev/null @@ -1,372 +0,0 @@ -package container - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "maps" - "os" - "slices" - "strings" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/chzyer/readline" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo - console *readline.Instance -} - -func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { - return &policyPlaygroundREPL{ - cmd: cmd, - nodes: map[string]netmap.NodeInfo{}, - } -} - -func (repl *policyPlaygroundREPL) handleLs(args []string) error { - if len(args) > 0 { - return fmt.Errorf("too many arguments for command 'ls': got %d, want 0", len(args)) - } - i := 1 - for id, node := range repl.nodes { - var attrs []string - for k, v := range node.Attributes() { - attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - } - fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) - i++ - } - return nil -} - -func (repl *policyPlaygroundREPL) handleAdd(args []string) error { - if len(args) == 0 { - return fmt.Errorf("too few arguments for command 'add': got %d, want >0", len(args)) - } - id := args[0] - key, err := hex.DecodeString(id) - if err != nil { - return fmt.Errorf("node id must be a hex string: got %q: %v", id, err) - } - node := repl.nodes[id] - node.SetPublicKey(key) - for _, attr := range args[1:] { - kv := strings.Split(attr, ":") - if len(kv) != 2 { - return fmt.Errorf("node attributes must be in the format 'KEY:VALUE': got %q", attr) - } - node.SetAttribute(kv[0], kv[1]) - } - repl.nodes[id] = node - return nil -} - -func (repl *policyPlaygroundREPL) handleLoad(args []string) error { - if len(args) != 1 { - return fmt.Errorf("too few arguments for command 'add': got %d, want 1", len(args)) - } - - jsonNetmap := map[string]map[string]string{} - - b, err := os.ReadFile(args[0]) - if err != nil { - return fmt.Errorf("reading netmap file %q: %v", args[0], err) - } - - if err := json.Unmarshal(b, &jsonNetmap); err != nil { - return fmt.Errorf("decoding json netmap: %v", err) - } - - repl.nodes = make(map[string]netmap.NodeInfo) - for id, attrs := range jsonNetmap { - key, err := hex.DecodeString(id) - if err != nil { - return fmt.Errorf("node id must be a hex string: got %q: %v", id, err) - } - - node := repl.nodes[id] - node.SetPublicKey(key) - for k, v := range attrs { - node.SetAttribute(k, v) - } - repl.nodes[id] = node - } - - return nil -} - -func (repl *policyPlaygroundREPL) handleRemove(args []string) error { - if len(args) == 0 { - return fmt.Errorf("too few arguments for command 'remove': got %d, want >0", len(args)) - } - id := args[0] - if _, exists := repl.nodes[id]; exists { - delete(repl.nodes, id) - return nil - } - return fmt.Errorf("node not found: id=%q", id) -} - -func (repl *policyPlaygroundREPL) handleEval(args []string) error { - policyStr := strings.TrimSpace(strings.Join(args, " ")) - var nodes [][]netmap.NodeInfo - nm := repl.netMap() - - if strings.HasPrefix(policyStr, "CBF") || strings.HasPrefix(policyStr, "SELECT") || strings.HasPrefix(policyStr, "FILTER") { - // Assume that the input is a partial SELECT-FILTER expression. - // Full inline policies always start with UNIQUE or REP keywords, - // or different prefixes when it's the case of an external file. - sfExpr, err := netmap.DecodeSelectFilterString(policyStr) - if err != nil { - return fmt.Errorf("parsing select-filter expression: %v", err) - } - nodes, err = nm.SelectFilterNodes(sfExpr) - if err != nil { - return fmt.Errorf("building select-filter nodes: %v", err) - } - } else { - // Assume that the input is a full policy or input file otherwise. - placementPolicy, err := parseContainerPolicy(repl.cmd, policyStr) - if err != nil { - return fmt.Errorf("parsing placement policy: %v", err) - } - nodes, err = nm.ContainerNodes(*placementPolicy, nil) - if err != nil { - return fmt.Errorf("building container nodes: %v", err) - } - } - for i, ns := range nodes { - var ids []string - for _, node := range ns { - ids = append(ids, hex.EncodeToString(node.PublicKey())) - } - fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) - } - - return nil -} - -func (repl *policyPlaygroundREPL) handleHelp(args []string) error { - if len(args) != 0 { - if _, ok := commands[args[0]]; !ok { - return fmt.Errorf("unknown command: %q", args[0]) - } - fmt.Fprintln(repl.console, commands[args[0]].usage) - return nil - } - - commandList := slices.Collect(maps.Keys(commands)) - slices.Sort(commandList) - for _, command := range commandList { - fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) - } - return nil -} - -func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { - var nm netmap.NetMap - var nodes []netmap.NodeInfo - for _, node := range repl.nodes { - nodes = append(nodes, node) - } - nm.SetNodes(nodes) - return nm -} - -type commandDescription struct { - descriprion string - usage string -} - -var commands = map[string]commandDescription{ - "list": { - descriprion: "Display all nodes in the netmap", - usage: `Display all nodes in the netmap -Example of usage: - list - 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} - 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} -`, - }, - - "ls": { - descriprion: "Display all nodes in the netmap", - usage: `Display all nodes in the netmap -Example of usage: - ls - 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} - 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} -`, - }, - - "add": { - descriprion: "Add a new node: add attr=value", - usage: `Add a new node -Example of usage: - add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, - }, - - "load": { - descriprion: "Load netmap from file: load ", - usage: `Load netmap from file -Example of usage: - load "netmap.json" -File format (netmap.json): -{ - "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { - "continent": "Europe", - "country": "Poland" - }, - "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { - "continent": "Antarctica", - "country": "Heard Island" - } -}`, - }, - - "remove": { - descriprion: "Remove a node: remove ", - usage: `Remove a node -Example of usage: - remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, - }, - - "rm": { - descriprion: "Remove a node: rm ", - usage: `Remove a node -Example of usage: - rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, - }, - - "eval": { - descriprion: "Evaluate a policy: eval ", - usage: `Evaluate a policy -Example of usage: - eval REP 2`, - }, - - "help": { - descriprion: "Show available commands", - }, -} - -func (repl *policyPlaygroundREPL) handleCommand(args []string) error { - if len(args) == 0 { - return nil - } - - switch args[0] { - case "list", "ls": - return repl.handleLs(args[1:]) - case "add": - return repl.handleAdd(args[1:]) - case "load": - return repl.handleLoad(args[1:]) - case "remove", "rm": - return repl.handleRemove(args[1:]) - case "eval": - return repl.handleEval(args[1:]) - case "help": - return repl.handleHelp(args[1:]) - } - return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) -} - -func (repl *policyPlaygroundREPL) run() error { - if len(viper.GetString(commonflags.RPC)) > 0 { - key := key.GetOrGenerate(repl.cmd) - cli := internalclient.GetSDKClientByFlag(repl.cmd, key, commonflags.RPC) - - var prm internalclient.NetMapSnapshotPrm - prm.SetClient(cli) - - resp, err := internalclient.NetMapSnapshot(repl.cmd.Context(), prm) - commonCmd.ExitOnErr(repl.cmd, "unable to get netmap snapshot to populate initial netmap: %w", err) - - for _, node := range resp.NetMap().Nodes() { - id := hex.EncodeToString(node.PublicKey()) - repl.nodes[id] = node - } - } - - if len(viper.GetString(netmapConfigPath)) > 0 { - err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) - commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) - } - - var cfgCompleter []readline.PrefixCompleterInterface - var helpSubItems []readline.PrefixCompleterInterface - - for name := range commands { - if name != "help" { - cfgCompleter = append(cfgCompleter, readline.PcItem(name)) - helpSubItems = append(helpSubItems, readline.PcItem(name)) - } - } - - cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) - completer := readline.NewPrefixCompleter(cfgCompleter...) - rl, err := readline.NewEx(&readline.Config{ - Prompt: "> ", - InterruptPrompt: "^C", - AutoComplete: completer, - }) - if err != nil { - return fmt.Errorf("error initializing readline: %w", err) - } - repl.console = rl - defer rl.Close() - - var exit bool - for { - line, err := rl.Readline() - if err != nil { - if errors.Is(err, readline.ErrInterrupt) { - if exit { - return nil - } - exit = true - continue - } - return fmt.Errorf("reading line: %w", err) - } - exit = false - - if err := repl.handleCommand(strings.Fields(line)); err != nil { - fmt.Fprintf(repl.console, "error: %v\n", err) - } - } -} - -var policyPlaygroundCmd = &cobra.Command{ - Use: "policy-playground", - Short: "A REPL for testing placement policies", - Long: `A REPL for testing placement policies. -If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, - Run: func(cmd *cobra.Command, _ []string) { - repl := newPolicyPlaygroundREPL(cmd) - commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) - }, -} - -const ( - netmapConfigPath = "netmap-config" - netmapConfigUsage = "Path to the netmap configuration file" -) - -func initContainerPolicyPlaygroundCmd() { - commonflags.Init(policyPlaygroundCmd) - policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) - - _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) -} diff --git a/cmd/frostfs-cli/modules/container/root.go b/cmd/frostfs-cli/modules/container/root.go deleted file mode 100644 index 2da21e767..000000000 --- a/cmd/frostfs-cli/modules/container/root.go +++ /dev/null @@ -1,55 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" -) - -// Cmd represents the container command. -var Cmd = &cobra.Command{ - Use: "container", - Short: "Operations with containers", - Long: "Operations with containers", - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - // bind exactly that cmd's flags to - // the viper before execution - commonflags.Bind(cmd) - commonflags.BindAPI(cmd) - }, -} - -func init() { - containerChildCommand := []*cobra.Command{ - listContainersCmd, - createContainerCmd, - deleteContainerCmd, - listContainerObjectsCmd, - getContainerInfoCmd, - containerNodesCmd, - policyPlaygroundCmd, - } - - Cmd.AddCommand(containerChildCommand...) - - initContainerListContainersCmd() - initContainerCreateCmd() - initContainerDeleteCmd() - initContainerListObjectsCmd() - initContainerInfoCmd() - initContainerNodesCmd() - initContainerPolicyPlaygroundCmd() - - for _, containerCommand := range containerChildCommand { - commonflags.InitAPI(containerCommand) - } - - for _, el := range []struct { - cmd *cobra.Command - verb string - }{ - {createContainerCmd, "PUT"}, - {deleteContainerCmd, "DELETE"}, - } { - commonflags.InitSession(el.cmd, "container "+el.verb) - } -} diff --git a/cmd/frostfs-cli/modules/container/util.go b/cmd/frostfs-cli/modules/container/util.go deleted file mode 100644 index 4cb268ec5..000000000 --- a/cmd/frostfs-cli/modules/container/util.go +++ /dev/null @@ -1,57 +0,0 @@ -package container - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "github.com/spf13/cobra" -) - -const ( - attributeDelimiter = "=" - - awaitTimeout = 120 // in seconds -) - -var ( - errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain") - errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain") -) - -func parseContainerID(cmd *cobra.Command) cid.ID { - if containerID == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("container ID is not set")) - } - - var id cid.ID - err := id.DecodeString(containerID) - commonCmd.ExitOnErr(cmd, "can't decode container ID value: %w", err) - return id -} - -// decodes session.Container from the file by path provided in -// commonflags.SessionToken flag. Returns nil if the path is not specified. -func getSession(cmd *cobra.Command) *session.Container { - common.PrintVerbose(cmd, "Reading container session...") - - path, _ := cmd.Flags().GetString(commonflags.SessionToken) - if path == "" { - common.PrintVerbose(cmd, "Session not provided.") - return nil - } - - common.PrintVerbose(cmd, "Reading container session from the file [%s]...", path) - - var res session.Container - - err := common.ReadBinaryOrJSON(cmd, &res, path) - commonCmd.ExitOnErr(cmd, "read container session: %v", err) - - common.PrintVerbose(cmd, "Session successfully read.") - - return &res -} diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go deleted file mode 100644 index 42f229ad9..000000000 --- a/cmd/frostfs-cli/modules/control/add_rule.go +++ /dev/null @@ -1,68 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -var addRuleCmd = &cobra.Command{ - Use: "add-rule", - Short: "Add local override", - Long: "Add local APE rule to a node with following format:\n[:action_detail] [ ...] ", - Example: `control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ... --rule "allow Object.Get *" ---rule "deny Object.Get EbxzAdz5LB4uqxuz6crWKAumBNtZyK2rKsqQP7TdZvwr/*" ---rule "deny:QuotaLimitReached Object.Put ResourceCondition:Department=HR *" - -control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ... --path some_chain.json -`, - Run: addRule, -} - -func addRule(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - target := parseTarget(cmd) - - parsed := apeCmd.ParseChain(cmd) - - req := &control.AddChainLocalOverrideRequest{ - Body: &control.AddChainLocalOverrideRequest_Body{ - Target: target, - Chain: parsed.Bytes(), - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.AddChainLocalOverrideResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.AddChainLocalOverride(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - cmd.Println("\nRule has been added.") -} - -func initControlAddRuleCmd() { - initControlFlags(addRuleCmd) - - ff := addRuleCmd.Flags() - ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement") - ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain") - ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = addRuleCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) - ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") - - addRuleCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag) -} diff --git a/cmd/frostfs-cli/modules/control/detach_shards.go b/cmd/frostfs-cli/modules/control/detach_shards.go deleted file mode 100644 index 025a6e561..000000000 --- a/cmd/frostfs-cli/modules/control/detach_shards.go +++ /dev/null @@ -1,49 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -var shardsDetachCmd = &cobra.Command{ - Use: "detach", - Short: "Detach and close the shards", - Long: "Detach and close the shards", - Run: shardsDetach, -} - -func shardsDetach(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := &control.DetachShardsRequest{ - Body: &control.DetachShardsRequest_Body{ - Shard_ID: getShardIDListFromIDFlag(cmd, false), - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.DetachShardsResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.DetachShards(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard mode update request successfully sent.") -} - -func initControlShardsDetachCmd() { - initControlFlags(shardsDetachCmd) - - flags := shardsDetachCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") -} diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go deleted file mode 100644 index 632cdd6a7..000000000 --- a/cmd/frostfs-cli/modules/control/doctor.go +++ /dev/null @@ -1,53 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -const ( - concurrencyFlag = "concurrency" - removeDuplicatesFlag = "remove-duplicates" -) - -var doctorCmd = &cobra.Command{ - Use: "doctor", - Short: "Restructure node's storage", - Long: "Restructure node's storage", - Run: doctor, -} - -func doctor(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := &control.DoctorRequest{Body: new(control.DoctorRequest_Body)} - req.Body.Concurrency, _ = cmd.Flags().GetUint32(concurrencyFlag) - req.Body.RemoveDuplicates, _ = cmd.Flags().GetBool(removeDuplicatesFlag) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.DoctorResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.Doctor(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Operation has finished.") -} - -func initControlDoctorCmd() { - initControlFlags(doctorCmd) - - ff := doctorCmd.Flags() - ff.Uint32(concurrencyFlag, 0, "Number of parallel threads to use") - ff.Bool(removeDuplicatesFlag, false, "Remove duplicate objects") -} diff --git a/cmd/frostfs-cli/modules/control/drop_objects.go b/cmd/frostfs-cli/modules/control/drop_objects.go deleted file mode 100644 index dcc1c1229..000000000 --- a/cmd/frostfs-cli/modules/control/drop_objects.go +++ /dev/null @@ -1,59 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -const dropObjectsFlag = "objects" - -var dropObjectsCmd = &cobra.Command{ - Use: "drop-objects", - Short: "Drop objects from the node's local storage", - Long: "Drop objects from the node's local storage", - Run: func(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - dropObjectsList, _ := cmd.Flags().GetStringSlice(dropObjectsFlag) - binAddrList := make([][]byte, len(dropObjectsList)) - - for i := range dropObjectsList { - binAddrList[i] = []byte(dropObjectsList[i]) - } - - body := new(control.DropObjectsRequest_Body) - body.SetAddressList(binAddrList) - - req := new(control.DropObjectsRequest) - req.SetBody(body) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.DropObjectsResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.DropObjects(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Objects were successfully marked to be removed.") - }, -} - -func initControlDropObjectsCmd() { - initControlFlags(dropObjectsCmd) - - flags := dropObjectsCmd.Flags() - flags.StringSliceP(dropObjectsFlag, "o", nil, - "List of object addresses to be removed in string format") - - _ = dropObjectsCmd.MarkFlagRequired(dropObjectsFlag) -} diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go deleted file mode 100644 index b8d7eb046..000000000 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ /dev/null @@ -1,390 +0,0 @@ -package control - -import ( - "crypto/ecdsa" - "fmt" - "strings" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "github.com/spf13/cobra" -) - -const ( - awaitFlag = "await" - noProgressFlag = "no-progress" - scopeFlag = "scope" - repOneOnlyFlag = "rep-one-only" - ignoreErrorsFlag = "no-errors" - - containerWorkerCountFlag = "container-worker-count" - objectWorkerCountFlag = "object-worker-count" - - scopeAll = "all" - scopeObjects = "objects" - scopeTrees = "trees" -) - -var evacuationShardCmd = &cobra.Command{ - Use: "evacuation", - Short: "Objects evacuation from shard", - Long: "Objects evacuation from shard to other shards", -} - -var startEvacuationShardCmd = &cobra.Command{ - Use: "start", - Short: "Start evacuate objects from shard", - Long: "Start evacuate objects from shard to other shards", - Run: startEvacuateShard, -} - -var getEvacuationShardStatusCmd = &cobra.Command{ - Use: "status", - Short: "Get evacuate objects from shard status", - Long: "Get evacuate objects from shard to other shards status", - Run: getEvacuateShardStatus, -} - -var stopEvacuationShardCmd = &cobra.Command{ - Use: "stop", - Short: "Stop running evacuate process", - Long: "Stop running evacuate process from shard to other shards", - Run: stopEvacuateShardStatus, -} - -var resetEvacuationStatusShardCmd = &cobra.Command{ - Use: "reset", - Short: "Reset evacuate objects from shard status", - Long: "Reset evacuate objects from shard to other shards status", - Run: resetEvacuateShardStatus, -} - -func startEvacuateShard(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag) - containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag) - objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag) - repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag) - - req := &control.StartShardEvacuationRequest{ - Body: &control.StartShardEvacuationRequest_Body{ - Shard_ID: getShardIDList(cmd), - IgnoreErrors: ignoreErrors, - Scope: getEvacuationScope(cmd), - ContainerWorkerCount: containerWorkerCount, - ObjectWorkerCount: objectWorkerCount, - RepOneOnly: repOneOnly, - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.StartShardEvacuationResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.StartShardEvacuation(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "Start evacuate shards failed, rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard evacuation has been successfully started.") - - if awaitCompletion, _ := cmd.Flags().GetBool(awaitFlag); awaitCompletion { - noProgress, _ := cmd.Flags().GetBool(noProgressFlag) - waitEvacuateCompletion(cmd, pk, cli, !noProgress, true) - } -} - -func getEvacuationScope(cmd *cobra.Command) uint32 { - rawScope, err := cmd.Flags().GetString(scopeFlag) - commonCmd.ExitOnErr(cmd, "Invalid scope value: %w", err) - switch rawScope { - case scopeAll: - return uint32(control.StartShardEvacuationRequest_Body_OBJECTS) | uint32(control.StartShardEvacuationRequest_Body_TREES) - case scopeObjects: - return uint32(control.StartShardEvacuationRequest_Body_OBJECTS) - case scopeTrees: - return uint32(control.StartShardEvacuationRequest_Body_TREES) - default: - commonCmd.ExitOnErr(cmd, "Invalid scope value: %w", fmt.Errorf("unknown scope %s", rawScope)) - } - return uint32(control.StartShardEvacuationRequest_Body_NONE) -} - -func getEvacuateShardStatus(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - req := &control.GetShardEvacuationStatusRequest{ - Body: &control.GetShardEvacuationStatusRequest_Body{}, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.GetShardEvacuationStatusResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.GetShardEvacuationStatus(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "Get evacuate shards status failed, rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - printStatus(cmd, resp) -} - -func stopEvacuateShardStatus(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - req := &control.StopShardEvacuationRequest{ - Body: &control.StopShardEvacuationRequest_Body{}, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.StopShardEvacuationResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.StopShardEvacuation(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "Stop evacuate shards failed, rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - waitEvacuateCompletion(cmd, pk, cli, false, false) - - cmd.Println("Evacuation stopped.") -} - -func resetEvacuateShardStatus(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - req := &control.ResetShardEvacuationStatusRequest{ - Body: &control.ResetShardEvacuationStatusRequest_Body{}, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.ResetShardEvacuationStatusResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.ResetShardEvacuationStatus(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "Reset shards evacuation status failed, rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shards evacuation status has been reset.") -} - -func waitEvacuateCompletion(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *clientSDK.Client, printProgress, printCompleted bool) { - const statusPollingInterval = 1 * time.Second - const reportIntervalSeconds = 5 - var resp *control.GetShardEvacuationStatusResponse - reportResponse := new(atomic.Pointer[control.GetShardEvacuationStatusResponse]) - pollingCompleted := make(chan struct{}) - progressReportCompleted := make(chan struct{}) - - go func() { - defer close(progressReportCompleted) - if !printProgress { - return - } - cmd.Printf("Progress will be reported every %d seconds.\n", reportIntervalSeconds) - for { - select { - case <-pollingCompleted: - return - case <-time.After(reportIntervalSeconds * time.Second): - r := reportResponse.Load() - if r == nil || r.GetBody().GetStatus() == control.GetShardEvacuationStatusResponse_Body_COMPLETED { - continue - } - printStatus(cmd, r) - } - } - }() - - for { - req := &control.GetShardEvacuationStatusRequest{ - Body: &control.GetShardEvacuationStatusRequest_Body{}, - } - signRequest(cmd, pk, req) - - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.GetShardEvacuationStatus(client, req) - return err - }) - - reportResponse.Store(resp) - - if err != nil { - commonCmd.ExitOnErr(cmd, "Failed to get evacuate status, rpc error: %w", err) - return - } - if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING { - break - } - - time.Sleep(statusPollingInterval) - } - close(pollingCompleted) - <-progressReportCompleted - if printCompleted { - printCompletedStatusMessage(cmd, resp) - } -} - -func printCompletedStatusMessage(cmd *cobra.Command, resp *control.GetShardEvacuationStatusResponse) { - cmd.Println("Shard evacuation has been completed.") - sb := &strings.Builder{} - appendShardIDs(sb, resp) - appendCounts(sb, resp) - appendError(sb, resp) - appendStartedAt(sb, resp) - appendDuration(sb, resp) - cmd.Println(sb.String()) -} - -func printStatus(cmd *cobra.Command, resp *control.GetShardEvacuationStatusResponse) { - if resp.GetBody().GetStatus() == control.GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED { - cmd.Println("There is no running or completed evacuation.") - return - } - sb := &strings.Builder{} - appendShardIDs(sb, resp) - appendStatus(sb, resp) - appendCounts(sb, resp) - appendError(sb, resp) - appendStartedAt(sb, resp) - appendDuration(sb, resp) - appendEstimation(sb, resp) - cmd.Println(sb.String()) -} - -func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING || - resp.GetBody().GetDuration() == nil || - (resp.GetBody().GetTotalObjects() == 0 && resp.GetBody().GetTotalTrees() == 0) || - (resp.GetBody().GetEvacuatedObjects()+resp.GetBody().GetFailedObjects()+resp.GetBody().GetSkippedObjects() == 0 && - resp.GetBody().GetEvacuatedTrees()+resp.GetBody().GetFailedTrees() == 0) { - return - } - - durationSeconds := float64(resp.GetBody().GetDuration().GetSeconds()) - evacuated := float64(resp.GetBody().GetEvacuatedObjects() + resp.GetBody().GetFailedObjects() + resp.GetBody().GetSkippedObjects() + - resp.GetBody().GetEvacuatedTrees() + resp.GetBody().GetFailedTrees()) - avgObjEvacuationTimeSeconds := durationSeconds / evacuated - objectsLeft := float64(resp.GetBody().GetTotalObjects()+resp.GetBody().GetTotalTrees()) - evacuated - leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft - leftMinutes := int(leftSeconds / 60) - - fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) -} - -func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - if resp.GetBody().GetDuration() != nil { - duration := time.Second * time.Duration(resp.GetBody().GetDuration().GetSeconds()) - hour := int(duration.Seconds() / 3600) - minute := int(duration.Seconds()/60) % 60 - second := int(duration.Seconds()) % 60 - fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) - } -} - -func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - if resp.GetBody().GetStartedAt() != nil { - startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) - } -} - -func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - if len(resp.GetBody().GetErrorMessage()) > 0 { - fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) - } -} - -func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - var status string - switch resp.GetBody().GetStatus() { - case control.GetShardEvacuationStatusResponse_Body_COMPLETED: - status = "completed" - case control.GetShardEvacuationStatusResponse_Body_RUNNING: - status = "running" - default: - status = "undefined" - } - fmt.Fprintf(sb, " Status: %s.", status) -} - -func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - sb.WriteString("Shard IDs: ") - for idx, shardID := range resp.GetBody().GetShard_ID() { - shardIDStr := shard.NewIDFromBytes(shardID).String() - if idx > 0 { - sb.WriteString(", ") - } - sb.WriteString(shardIDStr) - if idx == len(resp.GetBody().GetShard_ID())-1 { - sb.WriteString(".") - } - } -} - -func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", - resp.GetBody().GetEvacuatedObjects(), - resp.GetBody().GetTotalObjects(), - resp.GetBody().GetFailedObjects(), - resp.GetBody().GetSkippedObjects(), - resp.GetBody().GetEvacuatedTrees(), - resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees()) -} - -func initControlEvacuationShardCmd() { - evacuationShardCmd.AddCommand(startEvacuationShardCmd) - evacuationShardCmd.AddCommand(getEvacuationShardStatusCmd) - evacuationShardCmd.AddCommand(stopEvacuationShardCmd) - evacuationShardCmd.AddCommand(resetEvacuationStatusShardCmd) - - initControlStartEvacuationShardCmd() - initControlFlags(getEvacuationShardStatusCmd) - initControlFlags(stopEvacuationShardCmd) - initControlFlags(resetEvacuationStatusShardCmd) -} - -func initControlStartEvacuationShardCmd() { - initControlFlags(startEvacuationShardCmd) - - flags := startEvacuationShardCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - flags.Bool(shardAllFlag, false, "Process all shards") - flags.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects") - flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll)) - flags.Bool(awaitFlag, false, "Block execution until evacuation is completed") - flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag)) - flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers") - flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers") - flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'") - - startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/control/flush_cache.go b/cmd/frostfs-cli/modules/control/flush_cache.go deleted file mode 100644 index 280aacfad..000000000 --- a/cmd/frostfs-cli/modules/control/flush_cache.go +++ /dev/null @@ -1,56 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -const sealFlag = "seal" - -var flushCacheCmd = &cobra.Command{ - Use: "flush-cache", - Short: "Flush objects from the write-cache to the main storage", - Long: "Flush objects from the write-cache to the main storage", - Run: flushCache, - Deprecated: "Flushing objects from writecache to the main storage is performed by writecache automatically. To flush and seal writecache use `frostfs-cli control shards writecache seal`.", -} - -func flushCache(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - seal, _ := cmd.Flags().GetBool(sealFlag) - req := &control.FlushCacheRequest{Body: &control.FlushCacheRequest_Body{ - Seal: seal, - }} - req.Body.Shard_ID = getShardIDList(cmd) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.FlushCacheResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.FlushCache(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Write-cache has been flushed.") -} - -func initControlFlushCacheCmd() { - initControlFlags(flushCacheCmd) - - ff := flushCacheCmd.Flags() - ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - ff.Bool(shardAllFlag, false, "Process all shards") - ff.Bool(sealFlag, false, "Writecache will be left in read-only mode after flush completed") - - flushCacheCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go deleted file mode 100644 index 4da903a9a..000000000 --- a/cmd/frostfs-cli/modules/control/get_rule.go +++ /dev/null @@ -1,71 +0,0 @@ -package control - -import ( - "encoding/hex" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/spf13/cobra" -) - -var getRuleCmd = &cobra.Command{ - Use: "get-rule", - Short: "Get local override", - Long: "Get local APE override of the node", - Run: getRule, -} - -func getRule(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - target := parseTarget(cmd) - - chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag) - - if hexEncoded { - chainIDBytes, err := hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - chainID = string(chainIDBytes) - } - - req := &control.GetChainLocalOverrideRequest{ - Body: &control.GetChainLocalOverrideRequest_Body{ - Target: target, - ChainId: []byte(chainID), - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.GetChainLocalOverrideResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.GetChainLocalOverride(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - var chain apechain.Chain - commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain())) - apecmd.PrintHumanReadableAPEChain(cmd, &chain) -} - -func initControGetRuleCmd() { - initControlFlags(getRuleCmd) - - ff := getRuleCmd.Flags() - ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc) - ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc) - _ = getRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag) - ff.String(apecmd.ChainIDFlag, "", "Chain id") - ff.Bool(apecmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") -} diff --git a/cmd/frostfs-cli/modules/control/healthcheck.go b/cmd/frostfs-cli/modules/control/healthcheck.go deleted file mode 100644 index 1d4441f1e..000000000 --- a/cmd/frostfs-cli/modules/control/healthcheck.go +++ /dev/null @@ -1,66 +0,0 @@ -package control - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -const ( - healthcheckIRFlag = "ir" -) - -var healthCheckCmd = &cobra.Command{ - Use: "healthcheck", - Short: "Health check for FrostFS storage nodes", - Long: "Health check for FrostFS storage nodes.", - Run: healthCheck, -} - -func initControlHealthCheckCmd() { - initControlFlags(healthCheckCmd) - - flags := healthCheckCmd.Flags() - flags.Bool(healthcheckIRFlag, false, "Communicate with IR node") - flags.BoolP(commonflags.QuietFlag, commonflags.QuietFlagShorthand, false, commonflags.QuietFlagUsage) - _ = flags.MarkDeprecated(healthcheckIRFlag, "for health check of inner ring nodes, use the 'control ir healthcheck' command instead.") -} - -func healthCheck(cmd *cobra.Command, args []string) { - if isIR, _ := cmd.Flags().GetBool(healthcheckIRFlag); isIR { - irHealthCheck(cmd, args) - return - } - - pk := key.Get(cmd) - cli := getClient(cmd, pk) - - req := new(control.HealthCheckRequest) - req.SetBody(new(control.HealthCheckRequest_Body)) - - signRequest(cmd, pk, req) - - var resp *control.HealthCheckResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.HealthCheck(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - if quietFlag, _ := cmd.Flags().GetBool(commonflags.QuietFlag); quietFlag { - if resp.GetBody().GetHealthStatus() == control.HealthStatus_READY { - return - } - os.Exit(1) - } - - cmd.Printf("Network status: %s\n", resp.GetBody().GetNetmapStatus()) - cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus()) -} diff --git a/cmd/frostfs-cli/modules/control/ir.go b/cmd/frostfs-cli/modules/control/ir.go deleted file mode 100644 index 2a38f1e97..000000000 --- a/cmd/frostfs-cli/modules/control/ir.go +++ /dev/null @@ -1,34 +0,0 @@ -package control - -import ( - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -var irCmd = &cobra.Command{ - Use: "ir", - Short: "Operations with inner ring nodes", - Long: "Operations with inner ring nodes", -} - -func initControlIRCmd() { - irCmd.AddCommand(tickEpochCmd) - irCmd.AddCommand(removeNodeCmd) - irCmd.AddCommand(irHealthCheckCmd) - irCmd.AddCommand(removeContainerCmd) - - initControlIRTickEpochCmd() - initControlIRRemoveNodeCmd() - initControlIRHealthCheckCmd() - initControlIRRemoveContainerCmd() -} - -func printVUB(cmd *cobra.Command, vub uint32) { - cmd.Printf("Transaction's valid until block is %d\n", vub) -} - -func parseVUB(cmd *cobra.Command) uint32 { - vub, err := cmd.Flags().GetUint32(irFlagNameVUB) - commonCmd.ExitOnErr(cmd, "invalid valid until block value: %w", err) - return vub -} diff --git a/cmd/frostfs-cli/modules/control/ir_healthcheck.go b/cmd/frostfs-cli/modules/control/ir_healthcheck.go deleted file mode 100644 index 373f21c30..000000000 --- a/cmd/frostfs-cli/modules/control/ir_healthcheck.go +++ /dev/null @@ -1,55 +0,0 @@ -package control - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -var irHealthCheckCmd = &cobra.Command{ - Use: "healthcheck", - Short: "Health check for FrostFS inner ring nodes", - Long: "Health check for FrostFS inner ring nodes.", - Run: irHealthCheck, -} - -func initControlIRHealthCheckCmd() { - initControlFlags(irHealthCheckCmd) - flags := irHealthCheckCmd.Flags() - flags.BoolP(commonflags.QuietFlag, commonflags.QuietFlagShorthand, false, commonflags.QuietFlagUsage) -} - -func irHealthCheck(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - cli := getClient(cmd, pk) - - req := new(ircontrol.HealthCheckRequest) - - req.SetBody(new(ircontrol.HealthCheckRequest_Body)) - - err := ircontrolsrv.SignMessage(pk, req) - commonCmd.ExitOnErr(cmd, "could not sign request: %w", err) - - var resp *ircontrol.HealthCheckResponse - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = ircontrol.HealthCheck(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - if quietFlag, _ := cmd.Flags().GetBool(commonflags.QuietFlag); quietFlag { - if resp.GetBody().GetHealthStatus() == ircontrol.HealthStatus_READY { - return - } - os.Exit(1) - } - - cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus()) -} diff --git a/cmd/frostfs-cli/modules/control/ir_remove_container.go b/cmd/frostfs-cli/modules/control/ir_remove_container.go deleted file mode 100644 index 460e299e5..000000000 --- a/cmd/frostfs-cli/modules/control/ir_remove_container.go +++ /dev/null @@ -1,93 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" -) - -const ( - ownerFlag = "owner" -) - -var removeContainerCmd = &cobra.Command{ - Use: "remove-container", - Short: "Schedules a container removal", - Long: `Schedules a container removal via a notary request. -Container data will be deleted asynchronously by policer. -To check removal status "frostfs-cli container list" command can be used.`, - Run: removeContainer, -} - -func initControlIRRemoveContainerCmd() { - initControlIRFlags(removeContainerCmd) - - flags := removeContainerCmd.Flags() - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.String(ownerFlag, "", "Container owner's wallet address.") - removeContainerCmd.MarkFlagsMutuallyExclusive(commonflags.CIDFlag, ownerFlag) - removeContainerCmd.MarkFlagsOneRequired(commonflags.CIDFlag, ownerFlag) -} - -func removeContainer(cmd *cobra.Command, _ []string) { - req := prepareRemoveContainerRequest(cmd) - - pk := key.Get(cmd) - c := getClient(cmd, pk) - - commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req)) - - var resp *ircontrol.RemoveContainerResponse - err := c.ExecRaw(func(client *rawclient.Client) error { - var err error - resp, err = ircontrol.RemoveContainer(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "failed to execute request: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - if len(req.GetBody().GetContainerId()) > 0 { - cmd.Println("Container scheduled to removal") - } else { - cmd.Println("User containers sheduled to removal") - } - printVUB(cmd, resp.GetBody().GetVub()) -} - -func prepareRemoveContainerRequest(cmd *cobra.Command) *ircontrol.RemoveContainerRequest { - req := &ircontrol.RemoveContainerRequest{ - Body: &ircontrol.RemoveContainerRequest_Body{}, - } - - cidStr, err := cmd.Flags().GetString(commonflags.CIDFlag) - commonCmd.ExitOnErr(cmd, "failed to get cid: ", err) - - ownerStr, err := cmd.Flags().GetString(ownerFlag) - commonCmd.ExitOnErr(cmd, "failed to get owner: ", err) - - if len(ownerStr) > 0 { - var owner user.ID - commonCmd.ExitOnErr(cmd, "invalid owner ID: %w", owner.DecodeString(ownerStr)) - var ownerID refs.OwnerID - owner.WriteToV2(&ownerID) - req.Body.Owner = ownerID.StableMarshal(nil) - } - - if len(cidStr) > 0 { - var containerID cid.ID - commonCmd.ExitOnErr(cmd, "invalid container ID: %w", containerID.DecodeString(cidStr)) - req.Body.ContainerId = containerID[:] - } - - req.Body.Vub = parseVUB(cmd) - - return req -} diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go deleted file mode 100644 index 2fe686d63..000000000 --- a/cmd/frostfs-cli/modules/control/ir_remove_node.go +++ /dev/null @@ -1,60 +0,0 @@ -package control - -import ( - "encoding/hex" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -var removeNodeCmd = &cobra.Command{ - Use: "remove-node", - Short: "Forces a node removal from netmap", - Long: "Forces a node removal from netmap via a notary request. It should be executed on other IR nodes as well.", - Run: removeNode, -} - -func initControlIRRemoveNodeCmd() { - initControlIRFlags(removeNodeCmd) - - flags := removeNodeCmd.Flags() - flags.String("node", "", "Node public key as a hex string") - _ = removeNodeCmd.MarkFlagRequired("node") -} - -func removeNode(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - c := getClient(cmd, pk) - - nodeKeyStr, _ := cmd.Flags().GetString("node") - if len(nodeKeyStr) == 0 { - commonCmd.ExitOnErr(cmd, "parsing node public key: ", errors.New("key cannot be empty")) - } - nodeKey, err := hex.DecodeString(nodeKeyStr) - commonCmd.ExitOnErr(cmd, "can't decode node public key: %w", err) - - req := new(ircontrol.RemoveNodeRequest) - req.SetBody(&ircontrol.RemoveNodeRequest_Body{ - Key: nodeKey, - Vub: parseVUB(cmd), - }) - - commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req)) - - var resp *ircontrol.RemoveNodeResponse - err = c.ExecRaw(func(client *rawclient.Client) error { - resp, err = ircontrol.RemoveNode(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Node removed") - printVUB(cmd, resp.GetBody().GetVub()) -} diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go deleted file mode 100644 index 5f09e92c1..000000000 --- a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go +++ /dev/null @@ -1,46 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -var tickEpochCmd = &cobra.Command{ - Use: "tick-epoch", - Short: "Forces a new epoch", - Long: "Forces a new epoch via a notary request. It should be executed on other IR nodes as well.", - Run: tickEpoch, -} - -func initControlIRTickEpochCmd() { - initControlIRFlags(tickEpochCmd) -} - -func tickEpoch(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - c := getClient(cmd, pk) - - req := new(ircontrol.TickEpochRequest) - req.SetBody(&ircontrol.TickEpochRequest_Body{ - Vub: parseVUB(cmd), - }) - - err := ircontrolsrv.SignMessage(pk, req) - commonCmd.ExitOnErr(cmd, "could not sign request: %w", err) - - var resp *ircontrol.TickEpochResponse - err = c.ExecRaw(func(client *rawclient.Client) error { - resp, err = ircontrol.TickEpoch(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Epoch tick requested") - printVUB(cmd, resp.GetBody().GetVub()) -} diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go deleted file mode 100644 index a6c65d083..000000000 --- a/cmd/frostfs-cli/modules/control/list_rules.go +++ /dev/null @@ -1,89 +0,0 @@ -package control - -import ( - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/spf13/cobra" -) - -var listRulesCmd = &cobra.Command{ - Use: "list-rules", - Short: "List local overrides", - Long: "List local APE overrides of the node", - Run: listRules, -} - -var engineToControlSvcType = map[policyengine.TargetType]control.ChainTarget_TargetType{ - policyengine.Namespace: control.ChainTarget_NAMESPACE, - policyengine.Container: control.ChainTarget_CONTAINER, - policyengine.User: control.ChainTarget_USER, - policyengine.Group: control.ChainTarget_GROUP, -} - -func parseTarget(cmd *cobra.Command) *control.ChainTarget { - target := apeCmd.ParseTarget(cmd) - - typ, ok := engineToControlSvcType[target.Type] - if !ok { - commonCmd.ExitOnErr(cmd, "%w", fmt.Errorf("unknown type '%c", target.Type)) - } - - return &control.ChainTarget{ - Name: target.Name, - Type: typ, - } -} - -func listRules(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - target := parseTarget(cmd) - req := &control.ListChainLocalOverridesRequest{ - Body: &control.ListChainLocalOverridesRequest_Body{ - Target: target, - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.ListChainLocalOverridesResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.ListChainLocalOverrides(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - chains := resp.GetBody().GetChains() - if len(chains) == 0 { - cmd.Printf("Local overrides are not defined for the %s.\n", strings.ToLower(target.GetType().String())) - return - } - - for _, c := range chains { - var chain apechain.Chain - commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c)) - apeCmd.PrintHumanReadableAPEChain(cmd, &chain) - } -} - -func initControlListRulesCmd() { - initControlFlags(listRulesCmd) - - ff := listRulesCmd.Flags() - ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) - ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) - _ = listRulesCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) -} diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go deleted file mode 100644 index 3142d02e7..000000000 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ /dev/null @@ -1,78 +0,0 @@ -package control - -import ( - "bytes" - "fmt" - "strconv" - "text/tabwriter" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var listTargetsCmd = &cobra.Command{ - Use: "list-targets", - Short: "List local targets", - Long: "List local APE overrides of the node", - Run: listTargets, -} - -func listTargets(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - chainName := apeCmd.ParseChainName(cmd) - - req := &control.ListTargetsLocalOverridesRequest{ - Body: &control.ListTargetsLocalOverridesRequest_Body{ - ChainName: string(chainName), - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.ListTargetsLocalOverridesResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.ListTargetsLocalOverrides(client, req) - return err - }) - if err != nil && status.Code(err) == codes.NotFound { - cmd.Println("Local overrides are not defined for any target.") - return - } - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - targets := resp.GetBody().GetTargets() - if len(targets) == 0 { - cmd.Println("Local overrides are not defined for any target.") - return - } - - buf := bytes.NewBuffer(nil) - tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - _, _ = tw.Write([]byte("#\tName\tType\n")) - for i, t := range targets { - _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) - } - _ = tw.Flush() - cmd.Print(buf.String()) -} - -func initControlListTargetsCmd() { - initControlFlags(listTargetsCmd) - - ff := listTargetsCmd.Flags() - ff.String(apeCmd.ChainNameFlag, "", apeCmd.ChainNameFlagDesc) - - _ = cobra.MarkFlagRequired(ff, apeCmd.ChainNameFlag) -} diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go deleted file mode 100644 index 4cb4be539..000000000 --- a/cmd/frostfs-cli/modules/control/locate.go +++ /dev/null @@ -1,117 +0,0 @@ -package control - -import ( - "bytes" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" - "github.com/spf13/cobra" -) - -const ( - FullInfoFlag = "full" - FullInfoFlagUsage = "Print full ShardInfo." -) - -var locateObjectCmd = &cobra.Command{ - Use: "locate-object", - Short: "List shards storing the object", - Long: "List shards storing the object", - Run: locateObject, -} - -func initControlLocateObjectCmd() { - initControlFlags(locateObjectCmd) - - flags := locateObjectCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.") - flags.Bool(FullInfoFlag, false, FullInfoFlagUsage) -} - -func locateObject(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - _ = object.ReadObjectAddress(cmd, &cnr, &obj) - - pk := key.Get(cmd) - - body := new(control.ListShardsForObjectRequest_Body) - body.SetContainerId(cnr.EncodeToString()) - body.SetObjectId(obj.EncodeToString()) - req := new(control.ListShardsForObjectRequest) - req.SetBody(body) - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var err error - var resp *control.ListShardsForObjectResponse - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.ListShardsForObject(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - shardIDs := resp.GetBody().GetShard_ID() - - isFull, _ := cmd.Flags().GetBool(FullInfoFlag) - if !isFull { - for _, id := range shardIDs { - cmd.Println(base58.Encode(id)) - } - return - } - - // get full shard info - listShardsReq := new(control.ListShardsRequest) - listShardsReq.SetBody(new(control.ListShardsRequest_Body)) - signRequest(cmd, pk, listShardsReq) - var listShardsResp *control.ListShardsResponse - err = cli.ExecRaw(func(client *rawclient.Client) error { - listShardsResp, err = control.ListShards(client, listShardsReq) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody()) - - shards := listShardsResp.GetBody().GetShards() - sortShardsByID(shards) - shards = filterShards(shards, shardIDs) - - isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - if isJSON { - prettyPrintShardsJSON(cmd, shards) - } else { - prettyPrintShards(cmd, shards) - } -} - -func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo { - var res []control.ShardInfo - for _, id := range ids { - for _, inf := range info { - if bytes.Equal(inf.Shard_ID, id) { - res = append(res, inf) - } - } - } - return res -} diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go deleted file mode 100644 index 3df12a15d..000000000 --- a/cmd/frostfs-cli/modules/control/rebuild_shards.go +++ /dev/null @@ -1,88 +0,0 @@ -package control - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/mr-tron/base58" - "github.com/spf13/cobra" -) - -const ( - fillPercentFlag = "fill_percent" -) - -var shardsRebuildCmd = &cobra.Command{ - Use: "rebuild", - Short: "Rebuild shards", - Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)", - Run: shardsRebuild, -} - -func shardsRebuild(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := &control.StartShardRebuildRequest{ - Body: &control.StartShardRebuildRequest_Body{ - Shard_ID: getShardIDList(cmd), - TargetFillPercent: getFillPercentValue(cmd), - ConcurrencyLimit: getConcurrencyValue(cmd), - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.StartShardRebuildResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.StartShardRebuild(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - var success, failed uint - for _, res := range resp.GetBody().GetResults() { - if res.GetSuccess() { - success++ - cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID())) - } else { - failed++ - cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError()) - } - } - cmd.Printf("Total: %d success, %d failed\n", success, failed) -} - -func getFillPercentValue(cmd *cobra.Command) uint32 { - v, _ := cmd.Flags().GetUint32(fillPercentFlag) - if v <= 0 || v > 100 { - commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v)) - } - return v -} - -func getConcurrencyValue(cmd *cobra.Command) uint32 { - v, _ := cmd.Flags().GetUint32(concurrencyFlag) - if v <= 0 || v > 10000 { - commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v)) - } - return v -} - -func initControlShardRebuildCmd() { - initControlFlags(shardsRebuildCmd) - - flags := shardsRebuildCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - flags.Bool(shardAllFlag, false, "Process all shards") - flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space") - flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files") - setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go deleted file mode 100644 index 036317bcb..000000000 --- a/cmd/frostfs-cli/modules/control/remove_rule.go +++ /dev/null @@ -1,97 +0,0 @@ -package control - -import ( - "encoding/hex" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -var ( - errEmptyChainID = errors.New("chain id cannot be empty") - - removeRuleCmd = &cobra.Command{ - Use: "remove-rule", - Short: "Remove local override", - Long: "Remove local APE override of the node", - Run: removeRule, - } -) - -func removeRule(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag) - removeAll, _ := cmd.Flags().GetBool(apecmd.AllFlag) - if removeAll { - req := &control.RemoveChainLocalOverridesByTargetRequest{ - Body: &control.RemoveChainLocalOverridesByTargetRequest_Body{ - Target: parseTarget(cmd), - }, - } - signRequest(cmd, pk, req) - cli := getClient(cmd, pk) - var resp *control.RemoveChainLocalOverridesByTargetResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.RemoveChainLocalOverridesByTarget(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - cmd.Println("All rules have been removed.") - return - } - - chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID) - } - chainIDRaw := []byte(chainID) - - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - - req := &control.RemoveChainLocalOverrideRequest{ - Body: &control.RemoveChainLocalOverrideRequest_Body{ - Target: parseTarget(cmd), - ChainId: chainIDRaw, - }, - } - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.RemoveChainLocalOverrideResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.RemoveChainLocalOverride(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Rule has been removed.") -} - -func initControlRemoveRuleCmd() { - initControlFlags(removeRuleCmd) - - ff := removeRuleCmd.Flags() - ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc) - ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc) - _ = removeRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag) - ff.String(apecmd.ChainIDFlag, "", apecmd.ChainIDFlagDesc) - ff.Bool(apecmd.ChainIDHexFlag, false, apecmd.ChainIDHexFlagDesc) - ff.Bool(apecmd.AllFlag, false, "Remove all chains") - removeRuleCmd.MarkFlagsMutuallyExclusive(apecmd.AllFlag, apecmd.ChainIDFlag) -} diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go deleted file mode 100644 index 3abfe80cb..000000000 --- a/cmd/frostfs-cli/modules/control/root.go +++ /dev/null @@ -1,57 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var Cmd = &cobra.Command{ - Use: "control", - Short: "Operations with storage node", - Long: `Operations with storage node`, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - ff := cmd.Flags() - - _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) - _ = viper.BindPFlag(controlRPC, ff.Lookup(controlRPC)) - _ = viper.BindPFlag(commonflags.Timeout, ff.Lookup(commonflags.Timeout)) - }, -} - -const ( - controlRPC = "endpoint" - controlRPCDefault = "" - controlRPCUsage = "Remote node control address (as 'multiaddr' or ':')" -) - -func init() { - Cmd.AddCommand( - healthCheckCmd, - setNetmapStatusCmd, - dropObjectsCmd, - shardsCmd, - synchronizeTreeCmd, - irCmd, - addRuleCmd, - removeRuleCmd, - listRulesCmd, - getRuleCmd, - listTargetsCmd, - locateObjectCmd, - ) - - initControlHealthCheckCmd() - initControlSetNetmapStatusCmd() - initControlDropObjectsCmd() - initControlShardsCmd() - initControlSynchronizeTreeCmd() - initControlIRCmd() - initControlAddRuleCmd() - initControlRemoveRuleCmd() - initControlListRulesCmd() - initControGetRuleCmd() - initControlListTargetsCmd() - initControlLocateObjectCmd() -} diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go deleted file mode 100644 index 26a1ba883..000000000 --- a/cmd/frostfs-cli/modules/control/set_netmap_status.go +++ /dev/null @@ -1,159 +0,0 @@ -package control - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "github.com/spf13/cobra" -) - -const ( - netmapStatusFlag = "status" - - netmapStatusOnline = "online" - netmapStatusOffline = "offline" - netmapStatusMaintenance = "maintenance" - - maxSetStatusMaxWaitTime = 30 * time.Minute - setStatusWaitTimeout = 30 * time.Second -) - -var errNetmapStatusAwaitFailed = errors.New("netmap status hasn't changed for 30 minutes") - -var setNetmapStatusCmd = &cobra.Command{ - Use: "set-status", - Short: "Set status of the storage node in FrostFS network map", - Long: "Set status of the storage node in FrostFS network map", - Run: setNetmapStatus, -} - -func initControlSetNetmapStatusCmd() { - initControlFlags(setNetmapStatusCmd) - - flags := setNetmapStatusCmd.Flags() - flags.String(netmapStatusFlag, "", - fmt.Sprintf("New netmap status keyword ('%s', '%s', '%s')", - netmapStatusOnline, - netmapStatusOffline, - netmapStatusMaintenance, - ), - ) - - _ = setNetmapStatusCmd.MarkFlagRequired(netmapStatusFlag) - - flags.BoolP(commonflags.ForceFlag, commonflags.ForceFlagShorthand, false, - "Force turning to local maintenance") - - flags.Bool(commonflags.AwaitFlag, false, commonflags.AwaitFlagUsage) -} - -func setNetmapStatus(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - body := new(control.SetNetmapStatusRequest_Body) - force, _ := cmd.Flags().GetBool(commonflags.ForceFlag) - - printIgnoreForce := func(st control.NetmapStatus) { - if force { - common.PrintVerbose(cmd, "Ignore --%s flag for %s state.", commonflags.ForceFlag, st) - } - } - - await, _ := cmd.Flags().GetBool(commonflags.AwaitFlag) - var targetStatus control.NetmapStatus - switch st, _ := cmd.Flags().GetString(netmapStatusFlag); st { - default: - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("unsupported status %s", st)) - case netmapStatusOnline: - body.SetStatus(control.NetmapStatus_ONLINE) - printIgnoreForce(control.NetmapStatus_ONLINE) - targetStatus = control.NetmapStatus_ONLINE - case netmapStatusOffline: - body.SetStatus(control.NetmapStatus_OFFLINE) - printIgnoreForce(control.NetmapStatus_OFFLINE) - targetStatus = control.NetmapStatus_OFFLINE - case netmapStatusMaintenance: - body.SetStatus(control.NetmapStatus_MAINTENANCE) - - if force { - body.SetForceMaintenance(true) - common.PrintVerbose(cmd, "Local maintenance will be forced.") - } - targetStatus = control.NetmapStatus_MAINTENANCE - } - - req := new(control.SetNetmapStatusRequest) - req.SetBody(body) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.SetNetmapStatusResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.SetNetmapStatus(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Network status update request successfully sent.") - - if await { - awaitSetNetmapStatus(cmd, pk, cli, targetStatus) - } -} - -func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.Client, targetStatus control.NetmapStatus) { - req := &control.GetNetmapStatusRequest{ - Body: &control.GetNetmapStatusRequest_Body{}, - } - signRequest(cmd, pk, req) - var epoch uint64 - var status control.NetmapStatus - startTime := time.Now() - cmd.Println("Wait until epoch and netmap status change...") - for { - var resp *control.GetNetmapStatusResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.GetNetmapStatus(cmd.Context(), client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) - - if epoch == 0 { - epoch = resp.GetBody().GetEpoch() - } - - status = resp.GetBody().GetStatus() - if resp.GetBody().GetEpoch() > epoch { - epoch = resp.GetBody().GetEpoch() - cmd.Printf("Epoch changed to %d\n", resp.GetBody().GetEpoch()) - } - - if status == targetStatus { - break - } - - if time.Since(startTime) > maxSetStatusMaxWaitTime { - commonCmd.ExitOnErr(cmd, "failed to wait netmap status: %w", errNetmapStatusAwaitFailed) - return - } - - time.Sleep(setStatusWaitTimeout) - - cmd.Printf("Current netmap status '%s', target status '%s'\n", status.String(), targetStatus.String()) - } - cmd.Printf("Netmap status changed to '%s' successfully.\n", status.String()) -} diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go deleted file mode 100644 index 3483f5d62..000000000 --- a/cmd/frostfs-cli/modules/control/shards.go +++ /dev/null @@ -1,31 +0,0 @@ -package control - -import ( - "github.com/spf13/cobra" -) - -var shardsCmd = &cobra.Command{ - Use: "shards", - Short: "Operations with storage node's shards", - Long: "Operations with storage node's shards", -} - -func initControlShardsCmd() { - shardsCmd.AddCommand(listShardsCmd) - shardsCmd.AddCommand(setShardModeCmd) - shardsCmd.AddCommand(evacuationShardCmd) - shardsCmd.AddCommand(flushCacheCmd) - shardsCmd.AddCommand(doctorCmd) - shardsCmd.AddCommand(writecacheShardCmd) - shardsCmd.AddCommand(shardsDetachCmd) - shardsCmd.AddCommand(shardsRebuildCmd) - - initControlShardsListCmd() - initControlSetShardModeCmd() - initControlEvacuationShardCmd() - initControlFlushCacheCmd() - initControlDoctorCmd() - initControlShardsWritecacheCmd() - initControlShardsDetachCmd() - initControlShardRebuildCmd() -} diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go deleted file mode 100644 index 40d6628ee..000000000 --- a/cmd/frostfs-cli/modules/control/shards_list.go +++ /dev/null @@ -1,130 +0,0 @@ -package control - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/mr-tron/base58" - "github.com/spf13/cobra" -) - -var listShardsCmd = &cobra.Command{ - Use: "list", - Short: "List shards of the storage node", - Long: "List shards of the storage node", - Run: listShards, -} - -func initControlShardsListCmd() { - initControlFlags(listShardsCmd) - - flags := listShardsCmd.Flags() - flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array") -} - -func listShards(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := new(control.ListShardsRequest) - req.SetBody(new(control.ListShardsRequest_Body)) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.ListShardsResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.ListShards(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - shards := resp.GetBody().GetShards() - sortShardsByID(shards) - - isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - if isJSON { - prettyPrintShardsJSON(cmd, shards) - } else { - prettyPrintShards(cmd, shards) - } -} - -func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) { - out := make([]map[string]any, 0, len(ii)) - for _, i := range ii { - out = append(out, map[string]any{ - "shard_id": base58.Encode(i.GetShard_ID()), - "mode": shardModeToString(i.GetMode()), - "metabase": i.GetMetabasePath(), - "blobstor": i.GetBlobstor(), - "writecache": i.GetWritecachePath(), - "pilorama": i.GetPiloramaPath(), - "error_count": i.GetErrorCount(), - "evacuation_in_progress": i.GetEvacuationInProgress(), - }) - } - - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - enc.SetIndent("", " ") - commonCmd.ExitOnErr(cmd, "cannot shard info to JSON: %w", enc.Encode(out)) - - cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println -} - -func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) { - for _, i := range ii { - pathPrinter := func(name, path string) string { - if path == "" { - return "" - } - - return fmt.Sprintf("%s: %s\n", name, path) - } - - var sb strings.Builder - sb.WriteString("Blobstor:\n") - for j, info := range i.GetBlobstor() { - sb.WriteString(fmt.Sprintf("\tPath %d: %s\n\tType %d: %s\n", - j, info.GetPath(), j, info.GetType())) - } - - cmd.Printf("Shard %s:\nMode: %s\n"+ - pathPrinter("Metabase", i.GetMetabasePath())+ - sb.String()+ - pathPrinter("Write-cache", i.GetWritecachePath())+ - pathPrinter("Pilorama", i.GetPiloramaPath())+ - fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+ - fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()), - base58.Encode(i.GetShard_ID()), - shardModeToString(i.GetMode()), - ) - } -} - -func shardModeToString(m control.ShardMode) string { - strMode, ok := lookUpShardModeString(m) - if ok { - return strMode - } - - return "unknown" -} - -func sortShardsByID(ii []control.ShardInfo) { - sort.Slice(ii, func(i, j int) bool { - return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0 - }) -} diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go deleted file mode 100644 index 8fe01ba30..000000000 --- a/cmd/frostfs-cli/modules/control/shards_set_mode.go +++ /dev/null @@ -1,182 +0,0 @@ -package control - -import ( - "bytes" - "fmt" - "slices" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/mr-tron/base58" - "github.com/spf13/cobra" -) - -const ( - shardModeFlag = "mode" - shardIDFlag = "id" - shardAllFlag = "all" - shardClearErrorsFlag = "clear-errors" -) - -// maps string command input to control.ShardMode. To support new mode, it's -// enough to add the map entry. Modes are automatically printed in command help -// messages. -var mShardModes = map[string]struct { - val control.ShardMode - - // flag to support shard mode implicitly without help message. The flag is set - // for values which are not expected to be set by users but still supported - // for developers. - unsafe bool -}{ - "read-only": {val: control.ShardMode_READ_ONLY}, - "read-write": {val: control.ShardMode_READ_WRITE}, - "degraded-read-write": {val: control.ShardMode_DEGRADED, unsafe: true}, - "degraded-read-only": {val: control.ShardMode_DEGRADED_READ_ONLY}, -} - -// iterates over string representations of safe supported shard modes. Safe means -// modes which are expected to be used by any user. All other supported modes -// are for developers only. -func iterateSafeShardModes(f func(string)) { - for strMode, mode := range mShardModes { - if !mode.unsafe { - f(strMode) - } - } -} - -// looks up for supported control.ShardMode represented by the given string. -// Returns false if no corresponding mode exists. -func lookUpShardModeFromString(str string) (control.ShardMode, bool) { - mode, ok := mShardModes[str] - if !ok { - return control.ShardMode_SHARD_MODE_UNDEFINED, false - } - - return mode.val, true -} - -// looks up for string representation of supported shard mode. Returns false -// if mode is not supported. -func lookUpShardModeString(m control.ShardMode) (string, bool) { - for strMode, mode := range mShardModes { - if mode.val == m { - return strMode, true - } - } - - return "", false -} - -var setShardModeCmd = &cobra.Command{ - Use: "set-mode", - Short: "Set work mode of the shard", - Long: "Set work mode of the shard", - Run: setShardMode, -} - -func initControlSetShardModeCmd() { - initControlFlags(setShardModeCmd) - - flags := setShardModeCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - flags.Bool(shardAllFlag, false, "Process all shards") - - modes := make([]string, 0) - - iterateSafeShardModes(func(strMode string) { - modes = append(modes, "'"+strMode+"'") - }) - - flags.String(shardModeFlag, "", - fmt.Sprintf("New shard mode (%s)", strings.Join(modes, ", ")), - ) - _ = setShardModeCmd.MarkFlagRequired(shardModeFlag) - flags.Bool(shardClearErrorsFlag, false, "Set shard error count to 0") - - setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} - -func setShardMode(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - strMode, _ := cmd.Flags().GetString(shardModeFlag) - - mode, ok := lookUpShardModeFromString(strMode) - if !ok { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("unsupported mode %s", strMode)) - } - - req := new(control.SetShardModeRequest) - - body := new(control.SetShardModeRequest_Body) - req.SetBody(body) - - body.SetMode(mode) - body.SetShard_ID(getShardIDList(cmd)) - - reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag) - body.SetResetErrorCounter(reset) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.SetShardModeResponse - var err error - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.SetShardMode(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard mode update request successfully sent.") -} - -func getShardIDList(cmd *cobra.Command) [][]byte { - all, _ := cmd.Flags().GetBool(shardAllFlag) - if all { - return nil - } - - return getShardIDListFromIDFlag(cmd, true) -} - -func getShardIDListFromIDFlag(cmd *cobra.Command, withAllFlag bool) [][]byte { - sidList, _ := cmd.Flags().GetStringSlice(shardIDFlag) - if len(sidList) == 0 { - if withAllFlag { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("either --%s or --%s flag must be provided", shardIDFlag, shardAllFlag)) - } else { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("--%s flag value must be provided", shardIDFlag)) - } - } - - // We can sort the ID list and perform this check without additional allocations, - // but preserving the user order is a nice thing to have. - // Also, this is a CLI, we don't care too much about this. - seen := make(map[string]struct{}) - for i := range sidList { - if _, ok := seen[sidList[i]]; ok { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("duplicated shard IDs: %s", sidList[i])) - } - seen[sidList[i]] = struct{}{} - } - - res := make([][]byte, 0, len(sidList)) - for i := range sidList { - raw, err := base58.Decode(sidList[i]) - commonCmd.ExitOnErr(cmd, "incorrect shard ID encoding: %w", err) - - res = append(res, raw) - } - - slices.SortFunc(res, bytes.Compare) - return res -} diff --git a/cmd/frostfs-cli/modules/control/synchronize_tree.go b/cmd/frostfs-cli/modules/control/synchronize_tree.go deleted file mode 100644 index 1e4575f49..000000000 --- a/cmd/frostfs-cli/modules/control/synchronize_tree.go +++ /dev/null @@ -1,78 +0,0 @@ -package control - -import ( - "crypto/sha256" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -const ( - synchronizeTreeIDFlag = "tree-id" - synchronizeTreeHeightFlag = "height" -) - -var synchronizeTreeCmd = &cobra.Command{ - Use: "synchronize-tree", - Short: "Synchronize log for the tree", - Long: "Synchronize log for the tree in an object tree service.", - Run: synchronizeTree, -} - -func initControlSynchronizeTreeCmd() { - initControlFlags(synchronizeTreeCmd) - - flags := synchronizeTreeCmd.Flags() - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.String(synchronizeTreeIDFlag, "", "Tree ID") - flags.Uint64(synchronizeTreeHeightFlag, 0, "Starting height") -} - -func synchronizeTree(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - var cnr cid.ID - cidStr, _ := cmd.Flags().GetString(commonflags.CIDFlag) - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(cidStr)) - - treeID, _ := cmd.Flags().GetString("tree-id") - if treeID == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("tree ID must not be empty")) - } - - height, _ := cmd.Flags().GetUint64("height") - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - req := &control.SynchronizeTreeRequest{ - Body: &control.SynchronizeTreeRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Height: height, - }, - } - - err := ctrlmessage.Sign(pk, req) - commonCmd.ExitOnErr(cmd, "could not sign request: %w", err) - - cli := getClient(cmd, pk) - - var resp *control.SynchronizeTreeResponse - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.SynchronizeTree(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Tree has been synchronized successfully.") -} diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go deleted file mode 100644 index 41d9dbf8a..000000000 --- a/cmd/frostfs-cli/modules/control/util.go +++ /dev/null @@ -1,70 +0,0 @@ -package control - -import ( - "crypto/ecdsa" - "errors" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - "github.com/spf13/cobra" -) - -const ( - irFlagNameVUB = "vub" -) - -func initControlFlags(cmd *cobra.Command) { - ff := cmd.Flags() - ff.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) - ff.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - ff.String(controlRPC, controlRPCDefault, controlRPCUsage) - ff.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage) -} - -func initControlIRFlags(cmd *cobra.Command) { - initControlFlags(cmd) - - ff := cmd.Flags() - ff.Uint32(irFlagNameVUB, 0, "Valid until block value for notary transaction") -} - -func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req ctrlmessage.SignedMessage) { - err := ctrlmessage.Sign(pk, req) - commonCmd.ExitOnErr(cmd, "could not sign request: %w", err) -} - -func verifyResponse(cmd *cobra.Command, - sigControl interface { - GetKey() []byte - GetSign() []byte - }, - body interface { - MarshalProtobuf([]byte) []byte - }, -) { - if sigControl == nil { - commonCmd.ExitOnErr(cmd, "", errors.New("missing response signature")) - } - - // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion - var sigV2 refs.Signature - sigV2.SetScheme(refs.ECDSA_SHA512) - sigV2.SetKey(sigControl.GetKey()) - sigV2.SetSign(sigControl.GetSign()) - - var sig frostfscrypto.Signature - commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2)) - - if !sig.Verify(body.MarshalProtobuf(nil)) { - commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature")) - } -} - -func getClient(cmd *cobra.Command, pk *ecdsa.PrivateKey) *client.Client { - return internalclient.GetSDKClientByFlag(cmd, pk, controlRPC) -} diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go deleted file mode 100644 index d0c9a641b..000000000 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ /dev/null @@ -1,88 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/mr-tron/base58" - "github.com/spf13/cobra" -) - -const ( - asyncFlag = "async" - restoreModeFlag = "restore-mode" - shrinkFlag = "shrink" -) - -var writecacheShardCmd = &cobra.Command{ - Use: "writecache", - Short: "Operations with storage node's write-cache", - Long: "Operations with storage node's write-cache", -} - -var sealWritecacheShardCmd = &cobra.Command{ - Use: "seal", - Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", - Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", - Run: sealWritecache, -} - -func sealWritecache(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag) - async, _ := cmd.Flags().GetBool(asyncFlag) - restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag) - shrink, _ := cmd.Flags().GetBool(shrinkFlag) - - req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{ - Shard_ID: getShardIDList(cmd), - IgnoreErrors: ignoreErrors, - Async: async, - RestoreMode: restoreMode, - Shrink: shrink, - }} - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.SealWriteCacheResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.SealWriteCache(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - var success, failed uint - for _, res := range resp.GetBody().GetResults() { - if res.GetSuccess() { - success++ - cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID())) - } else { - failed++ - cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError()) - } - } - cmd.Printf("Total: %d success, %d failed\n", success, failed) -} - -func initControlShardsWritecacheCmd() { - writecacheShardCmd.AddCommand(sealWritecacheShardCmd) - - initControlFlags(sealWritecacheShardCmd) - - ff := sealWritecacheShardCmd.Flags() - ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - ff.Bool(shardAllFlag, false, "Process all shards") - ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects") - ff.Bool(asyncFlag, false, "Run operation in background") - ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing") - ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage") - - sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/netmap/get_epoch.go b/cmd/frostfs-cli/modules/netmap/get_epoch.go deleted file mode 100644 index 8e60e69bf..000000000 --- a/cmd/frostfs-cli/modules/netmap/get_epoch.go +++ /dev/null @@ -1,35 +0,0 @@ -package netmap - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -var getEpochCmd = &cobra.Command{ - Use: "epoch", - Short: "Get current epoch number", - Long: "Get current epoch number", - Run: func(cmd *cobra.Command, _ []string) { - p := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC) - - prm := internalclient.NetworkInfoPrm{ - Client: cli, - } - - res, err := internalclient.NetworkInfo(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - netInfo := res.NetworkInfo() - - cmd.Println(netInfo.CurrentEpoch()) - }, -} - -func initGetEpochCmd() { - commonflags.Init(getEpochCmd) - commonflags.InitAPI(getEpochCmd) -} diff --git a/cmd/frostfs-cli/modules/netmap/netinfo.go b/cmd/frostfs-cli/modules/netmap/netinfo.go deleted file mode 100644 index 2dbd72355..000000000 --- a/cmd/frostfs-cli/modules/netmap/netinfo.go +++ /dev/null @@ -1,62 +0,0 @@ -package netmap - -import ( - "encoding/hex" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/config/netmode" - "github.com/spf13/cobra" -) - -var netInfoCmd = &cobra.Command{ - Use: "netinfo", - Short: "Get information about FrostFS network", - Long: "Get information about FrostFS network", - Run: func(cmd *cobra.Command, _ []string) { - p := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC) - - prm := internalclient.NetworkInfoPrm{ - Client: cli, - } - - res, err := internalclient.NetworkInfo(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - netInfo := res.NetworkInfo() - - cmd.Printf("Epoch: %d\n", netInfo.CurrentEpoch()) - - magic := netInfo.MagicNumber() - cmd.Printf("Network magic: [%s] %d\n", netmode.Magic(magic), magic) - - cmd.Printf("Time per block: %s\n", time.Duration(netInfo.MsPerBlock())*time.Millisecond) - - const format = " %s: %v\n" - - cmd.Println("FrostFS network configuration (system)") - cmd.Printf(format, "Container fee", netInfo.ContainerFee()) - cmd.Printf(format, "Epoch duration", netInfo.EpochDuration()) - cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee()) - cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize()) - cmd.Printf(format, "Maximum count of data shards", netInfo.MaxECDataCount()) - cmd.Printf(format, "Maximum count of parity shards", netInfo.MaxECParityCount()) - cmd.Printf(format, "Withdrawal fee", netInfo.WithdrawalFee()) - cmd.Printf(format, "Homomorphic hashing disabled", netInfo.HomomorphicHashingDisabled()) - cmd.Printf(format, "Maintenance mode allowed", netInfo.MaintenanceModeAllowed()) - - cmd.Println("FrostFS network configuration (other)") - netInfo.IterateRawNetworkParameters(func(name string, value []byte) { - cmd.Printf(format, name, hex.EncodeToString(value)) - }) - }, -} - -func initNetInfoCmd() { - commonflags.Init(netInfoCmd) - commonflags.InitAPI(netInfoCmd) -} diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go deleted file mode 100644 index 5da66dcd9..000000000 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ /dev/null @@ -1,72 +0,0 @@ -package netmap - -import ( - "encoding/hex" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/spf13/cobra" -) - -const nodeInfoJSONFlag = commonflags.JSON - -var nodeInfoCmd = &cobra.Command{ - Use: "nodeinfo", - Short: "Get target node info", - Long: `Get target node info`, - Run: func(cmd *cobra.Command, _ []string) { - p := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC) - - prm := internalclient.NodeInfoPrm{ - Client: cli, - } - - res, err := internalclient.NodeInfo(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - prettyPrintNodeInfo(cmd, res.NodeInfo()) - }, -} - -func initNodeInfoCmd() { - commonflags.Init(nodeInfoCmd) - commonflags.InitAPI(nodeInfoCmd) - nodeInfoCmd.Flags().Bool(nodeInfoJSONFlag, false, "Print node info in JSON format") -} - -func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { - isJSON, _ := cmd.Flags().GetBool(nodeInfoJSONFlag) - if isJSON { - common.PrettyPrintJSON(cmd, i, "node info") - return - } - - cmd.Println("key:", hex.EncodeToString(i.PublicKey())) - - var stateWord string - switch i.Status() { - default: - stateWord = "" - case netmap.Online: - stateWord = "online" - case netmap.Offline: - stateWord = "offline" - case netmap.Maintenance: - stateWord = "maintenance" - } - - cmd.Println("state:", stateWord) - - for s := range i.NetworkEndpoints() { - cmd.Println("address:", s) - } - - for key, value := range i.Attributes() { - cmd.Printf("attribute: %s=%s\n", key, value) - } -} diff --git a/cmd/frostfs-cli/modules/netmap/root.go b/cmd/frostfs-cli/modules/netmap/root.go deleted file mode 100644 index b4f5897e5..000000000 --- a/cmd/frostfs-cli/modules/netmap/root.go +++ /dev/null @@ -1,32 +0,0 @@ -package netmap - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "netmap", - Short: "Operations with Network Map", - Long: `Operations with Network Map`, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - // bind exactly that cmd's flags to - // the viper before execution - commonflags.Bind(cmd) - commonflags.BindAPI(cmd) - }, -} - -func init() { - Cmd.AddCommand( - getEpochCmd, - nodeInfoCmd, - netInfoCmd, - snapshotCmd, - ) - - initGetEpochCmd() - initNetInfoCmd() - initNodeInfoCmd() - initSnapshotCmd() -} diff --git a/cmd/frostfs-cli/modules/netmap/snapshot.go b/cmd/frostfs-cli/modules/netmap/snapshot.go deleted file mode 100644 index 650d8a1b9..000000000 --- a/cmd/frostfs-cli/modules/netmap/snapshot.go +++ /dev/null @@ -1,32 +0,0 @@ -package netmap - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -var snapshotCmd = &cobra.Command{ - Use: "snapshot", - Short: "Request current local snapshot of the network map", - Long: `Request current local snapshot of the network map`, - Run: func(cmd *cobra.Command, _ []string) { - p := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC) - - var prm internalclient.NetMapSnapshotPrm - prm.SetClient(cli) - - res, err := internalclient.NetMapSnapshot(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - commonCmd.PrettyPrintNetMap(cmd, res.NetMap(), false) - }, -} - -func initSnapshotCmd() { - commonflags.Init(snapshotCmd) - commonflags.InitAPI(snapshotCmd) -} diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go deleted file mode 100644 index 08a9ac4c8..000000000 --- a/cmd/frostfs-cli/modules/object/delete.go +++ /dev/null @@ -1,75 +0,0 @@ -package object - -import ( - "fmt" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var objectDelCmd = &cobra.Command{ - Use: "delete", - Aliases: []string{"del"}, - Short: "Delete object from FrostFS", - Long: "Delete object from FrostFS", - Run: deleteObject, -} - -func initObjectDeleteCmd() { - commonflags.Init(objectDelCmd) - initFlagSession(objectDelCmd, "DELETE") - - flags := objectDelCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - flags.Bool(binaryFlag, false, "Deserialize object structure from given file.") - flags.String(fileFlag, "", "File with object payload") -} - -func deleteObject(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - var objAddr oid.Address - - binary, _ := cmd.Flags().GetBool(binaryFlag) - if binary { - filename, _ := cmd.Flags().GetString(fileFlag) - if filename == "" { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", fileFlag)) - } - objAddr = readObjectAddressBin(cmd, &cnr, &obj, filename) - } else { - cidVal, _ := cmd.Flags().GetString(commonflags.CIDFlag) - if cidVal == "" { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.CIDFlag)) - } - - oidVal, _ := cmd.Flags().GetString(commonflags.OIDFlag) - if oidVal == "" { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) - } - - objAddr = ReadObjectAddress(cmd, &cnr, &obj) - } - - pk := key.GetOrGenerate(cmd) - - var prm internalclient.DeleteObjectPrm - ReadOrOpenSession(cmd, &prm, pk, cnr, &obj) - Prepare(cmd, &prm) - prm.SetAddress(objAddr) - - res, err := internalclient.DeleteObject(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - tomb := res.Tombstone() - - cmd.Println("Object removed successfully.") - cmd.Printf(" ID: %s\n CID: %s\n", tomb, cnr) -} diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go deleted file mode 100644 index 7312f5384..000000000 --- a/cmd/frostfs-cli/modules/object/get.go +++ /dev/null @@ -1,156 +0,0 @@ -package object - -import ( - "bytes" - "fmt" - "io" - "os" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/cheggaaa/pb" - "github.com/spf13/cobra" -) - -var objectGetCmd = &cobra.Command{ - Use: "get", - Short: "Get object from FrostFS", - Long: "Get object from FrostFS", - Run: getObject, -} - -func initObjectGetCmd() { - commonflags.Init(objectGetCmd) - initFlagSession(objectGetCmd, "GET") - - flags := objectGetCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectGetCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = objectGetCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.String(fileFlag, "", "File to write object payload to(with -b together with signature and header). Default: stdout.") - flags.Bool(rawFlag, false, rawFlagDesc) - flags.Bool(noProgressFlag, false, "Do not show progress bar") - flags.Bool(binaryFlag, false, "Serialize whole object structure into given file(id + signature + header + payload).") -} - -func getObject(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - objAddr := ReadObjectAddress(cmd, &cnr, &obj) - - filename := cmd.Flag(fileFlag).Value.String() - out, closer := createOutWriter(cmd, filename) - defer closer() - - pk := key.GetOrGenerate(cmd) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.GetObjectPrm - prm.SetClient(cli) - Prepare(cmd, &prm) - readSession(cmd, &prm, pk, cnr, obj) - - raw, _ := cmd.Flags().GetBool(rawFlag) - prm.SetRawFlag(raw) - prm.SetAddress(objAddr) - - var p *pb.ProgressBar - noProgress, _ := cmd.Flags().GetBool(noProgressFlag) - - var payloadWriter io.Writer - var payloadBuffer *bytes.Buffer - binary, _ := cmd.Flags().GetBool(binaryFlag) - if binary { - payloadBuffer = new(bytes.Buffer) - payloadWriter = payloadBuffer - } else { - payloadWriter = out - } - - if filename == "" || noProgress { - prm.SetPayloadWriter(payloadWriter) - } else { - p = pb.New64(0) - p.Output = cmd.OutOrStdout() - prm.SetPayloadWriter(p.NewProxyWriter(payloadWriter)) - prm.SetHeaderCallback(func(o *objectSDK.Object) { - p.SetTotal64(int64(o.PayloadSize())) - p.Start() - }) - } - - res, err := internalclient.GetObject(cmd.Context(), prm) - if p != nil { - p.Finish() - } - if err != nil { - if ok := printSplitInfoErr(cmd, err); ok { - return - } - - if ok := printECInfoErr(cmd, err); ok { - return - } - - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - } - - processResult(cmd, res, binary, payloadBuffer, out, filename) -} - -func processResult(cmd *cobra.Command, res *internalclient.GetObjectRes, binary bool, payloadBuffer *bytes.Buffer, out io.Writer, filename string) { - if binary { - objToStore := res.Header() - // TODO(@acid-ant): #1932 Use streams to marshal/unmarshal payload - objToStore.SetPayload(payloadBuffer.Bytes()) - objBytes, err := objToStore.Marshal() - commonCmd.ExitOnErr(cmd, "", err) - _, err = out.Write(objBytes) - commonCmd.ExitOnErr(cmd, "unable to write binary object in out: %w ", err) - } - - if filename != "" && !strictOutput(cmd) { - cmd.Printf("[%s] Object successfully saved\n", filename) - } - - // Print header only if file is not streamed to stdout. - if filename != "" { - err := printHeader(cmd, res.Header()) - commonCmd.ExitOnErr(cmd, "", err) - } -} - -func createOutWriter(cmd *cobra.Command, filename string) (out io.Writer, closer func()) { - if filename == "" { - out = os.Stdout - closer = func() {} - } else { - f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) - if err != nil { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err)) - } - - out = f - closer = func() { - f.Close() - } - } - return -} - -func strictOutput(cmd *cobra.Command) bool { - toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - toProto, _ := cmd.Flags().GetBool("proto") - return toJSON || toProto -} diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go deleted file mode 100644 index 25df375d4..000000000 --- a/cmd/frostfs-cli/modules/object/hash.go +++ /dev/null @@ -1,101 +0,0 @@ -package object - -import ( - "encoding/hex" - "fmt" - "strings" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -const getRangeHashSaltFlag = "salt" - -const ( - hashSha256 = "sha256" - hashTz = "tz" - rangeSep = ":" -) - -var objectHashCmd = &cobra.Command{ - Use: "hash", - Short: "Get object hash", - Long: "Get object hash", - Run: getObjectHash, -} - -func initObjectHashCmd() { - commonflags.Init(objectHashCmd) - initFlagSession(objectHashCmd, "RANGEHASH") - - flags := objectHashCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectHashCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") - _ = objectHashCmd.MarkFlagRequired("range") - - flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") - flags.String(getRangeHashSaltFlag, "", "Salt in hex format") -} - -func getObjectHash(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - objAddr := ReadObjectAddress(cmd, &cnr, &obj) - - ranges, err := getRangeList(cmd) - commonCmd.ExitOnErr(cmd, "", err) - typ, err := getHashType(cmd) - commonCmd.ExitOnErr(cmd, "", err) - - strSalt := strings.TrimPrefix(cmd.Flag(getRangeHashSaltFlag).Value.String(), "0x") - - salt, err := hex.DecodeString(strSalt) - commonCmd.ExitOnErr(cmd, "could not decode salt: %w", err) - - pk := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var hashPrm internalclient.HashPayloadRangesPrm - hashPrm.SetClient(cli) - Prepare(cmd, &hashPrm) - readSession(cmd, &hashPrm, pk, cnr, obj) - hashPrm.SetAddress(objAddr) - hashPrm.SetSalt(salt) - hashPrm.SetRanges(ranges) - - if typ == hashTz { - hashPrm.TZ() - } - - res, err := internalclient.HashPayloadRanges(cmd.Context(), hashPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - hs := res.HashList() - - for i := range hs { - cmd.Printf("Offset=%d (Length=%d)\t: %s\n", ranges[i].GetOffset(), ranges[i].GetLength(), - hex.EncodeToString(hs[i])) - } -} - -func getHashType(cmd *cobra.Command) (string, error) { - rawType := cmd.Flag("type").Value.String() - switch typ := strings.ToLower(rawType); typ { - case hashSha256, hashTz: - return typ, nil - default: - return "", fmt.Errorf("invalid hash type: %s", typ) - } -} diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go deleted file mode 100644 index 97e996cad..000000000 --- a/cmd/frostfs-cli/modules/object/head.go +++ /dev/null @@ -1,212 +0,0 @@ -package object - -import ( - "encoding/hex" - "errors" - "fmt" - "os" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var objectHeadCmd = &cobra.Command{ - Use: "head", - Short: "Get object header", - Long: "Get object header", - Run: getObjectHeader, -} - -func initObjectHeadCmd() { - commonflags.Init(objectHeadCmd) - initFlagSession(objectHeadCmd, "HEAD") - - flags := objectHeadCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectHeadCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.String(fileFlag, "", "File to write header to. Default: stdout.") - flags.Bool(commonflags.JSON, false, "Marshal output in JSON") - flags.Bool("proto", false, "Marshal output in Protobuf") - flags.Bool(rawFlag, false, rawFlagDesc) -} - -func getObjectHeader(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - objAddr := ReadObjectAddress(cmd, &cnr, &obj) - pk := key.GetOrGenerate(cmd) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.HeadObjectPrm - prm.SetClient(cli) - Prepare(cmd, &prm) - readSession(cmd, &prm, pk, cnr, obj) - - raw, _ := cmd.Flags().GetBool(rawFlag) - prm.SetRawFlag(raw) - prm.SetAddress(objAddr) - - res, err := internalclient.HeadObject(cmd.Context(), prm) - if err != nil { - if ok := printSplitInfoErr(cmd, err); ok { - return - } - - if ok := printECInfoErr(cmd, err); ok { - return - } - - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - } - - err = saveAndPrintHeader(cmd, res.Header(), cmd.Flag(fileFlag).Value.String()) - commonCmd.ExitOnErr(cmd, "", err) -} - -func saveAndPrintHeader(cmd *cobra.Command, obj *objectSDK.Object, filename string) error { - bs, err := marshalHeader(cmd, obj) - if err != nil { - return fmt.Errorf("could not marshal header: %w", err) - } - if len(bs) != 0 { - if filename == "" { - cmd.Println(string(bs)) - return nil - } - err = os.WriteFile(filename, bs, os.ModePerm) - if err != nil { - return fmt.Errorf("could not write header to file: %w", err) - } - cmd.Printf("[%s] Header successfully saved.", filename) - } - - return printHeader(cmd, obj) -} - -func marshalHeader(cmd *cobra.Command, hdr *objectSDK.Object) ([]byte, error) { - toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - toProto, _ := cmd.Flags().GetBool("proto") - switch { - case toJSON && toProto: - return nil, errors.New("'--json' and '--proto' flags are mutually exclusive") - case toJSON: - return hdr.MarshalJSON() - case toProto: - return hdr.Marshal() - default: - return nil, nil - } -} - -func printObjectID(cmd *cobra.Command, recv func() (oid.ID, bool)) { - var strID string - - id, ok := recv() - if ok { - strID = id.String() - } else { - strID = "" - } - - cmd.Printf("ID: %s\n", strID) -} - -func printContainerID(cmd *cobra.Command, recv func() (cid.ID, bool)) { - var strID string - - id, ok := recv() - if ok { - strID = id.String() - } else { - strID = "" - } - - cmd.Printf("CID: %s\n", strID) -} - -func printHeader(cmd *cobra.Command, obj *objectSDK.Object) error { - printObjectID(cmd, obj.ID) - printContainerID(cmd, obj.ContainerID) - cmd.Printf("Owner: %s\n", obj.OwnerID()) - cmd.Printf("CreatedAt: %d\n", obj.CreationEpoch()) - cmd.Printf("Size: %d\n", obj.PayloadSize()) - common.PrintChecksum(cmd, "HomoHash", obj.PayloadHomomorphicHash) - common.PrintChecksum(cmd, "Checksum", obj.PayloadChecksum) - cmd.Printf("Type: %s\n", obj.Type()) - - cmd.Println("Attributes:") - for _, attr := range obj.Attributes() { - if attr.Key() == objectSDK.AttributeTimestamp { - cmd.Printf(" %s=%s (%s)\n", - attr.Key(), - attr.Value(), - common.PrettyPrintUnixTime(attr.Value())) - continue - } - cmd.Printf(" %s=%s\n", attr.Key(), attr.Value()) - } - - if signature := obj.Signature(); signature != nil { - cmd.Print("ID signature:\n") - - // TODO(@carpawell): #468 implement and use another approach to avoid conversion - var sigV2 refs.Signature - signature.WriteToV2(&sigV2) - - cmd.Printf(" public key: %s\n", hex.EncodeToString(sigV2.GetKey())) - cmd.Printf(" signature: %s\n", hex.EncodeToString(sigV2.GetSign())) - } - - if ecHeader := obj.ECHeader(); ecHeader != nil { - cmd.Print("EC header:\n") - - cmd.Printf(" parent object ID: %s\n", ecHeader.Parent().EncodeToString()) - cmd.Printf(" index: %d\n", ecHeader.Index()) - cmd.Printf(" total: %d\n", ecHeader.Total()) - cmd.Printf(" header length: %d\n", ecHeader.HeaderLength()) - } - - return printSplitHeader(cmd, obj) -} - -func printSplitHeader(cmd *cobra.Command, obj *objectSDK.Object) error { - if splitID := obj.SplitID(); splitID != nil { - cmd.Printf("Split ID: %s\n", splitID) - } - - if oid, ok := obj.ParentID(); ok { - cmd.Printf("Split ParentID: %s\n", oid) - } - - if prev, ok := obj.PreviousID(); ok { - cmd.Printf("Split PreviousID: %s\n", prev) - } - - for _, child := range obj.Children() { - cmd.Printf("Split ChildID: %s\n", child.String()) - } - - parent := obj.Parent() - if parent != nil { - cmd.Print("\nSplit Parent Header:\n") - - return printHeader(cmd, parent) - } - - return nil -} diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go deleted file mode 100644 index d67db9f0d..000000000 --- a/cmd/frostfs-cli/modules/object/lock.go +++ /dev/null @@ -1,132 +0,0 @@ -package object - -import ( - "context" - "errors" - "fmt" - "strconv" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// object lock command. -var objectLockCmd = &cobra.Command{ - Use: "lock", - Short: "Lock object in container", - Long: "Lock object in container", - Run: func(cmd *cobra.Command, _ []string) { - cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidRaw) - commonCmd.ExitOnErr(cmd, "Incorrect container arg: %v", err) - - key := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - - oidsRaw, _ := cmd.Flags().GetStringSlice(commonflags.OIDFlag) - - lockList := make([]oid.ID, 0, len(oidsRaw)) - oidM := make(map[oid.ID]struct{}) - - for i, oidRaw := range oidsRaw { - var oID oid.ID - err = oID.DecodeString(oidRaw) - commonCmd.ExitOnErr(cmd, fmt.Sprintf("Incorrect object arg #%d: %%v", i+1), err) - - if _, ok := oidM[oID]; ok { - continue - } - - lockList = append(lockList, oID) - oidM[oID] = struct{}{} - - for _, relative := range collectObjectRelatives(cmd, cli, cnr, oID) { - if _, ok := oidM[relative]; ok { - continue - } - - lockList = append(lockList, relative) - oidM[relative] = struct{}{} - } - } - - var idOwner user.ID - user.IDFromKey(&idOwner, key.PublicKey) - - var lock objectSDK.Lock - lock.WriteMembers(lockList) - - exp, _ := cmd.Flags().GetUint64(commonflags.ExpireAt) - lifetime, _ := cmd.Flags().GetUint64(commonflags.Lifetime) - if exp == 0 && lifetime == 0 { // mutual exclusion is ensured by cobra - commonCmd.ExitOnErr(cmd, "", errors.New("either expiration epoch of a lifetime is required")) - } - - if lifetime != 0 { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - defer cancel() - - endpoint := viper.GetString(commonflags.RPC) - - currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) - commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) - - exp = currEpoch + lifetime - } - - common.PrintVerbose(cmd, "Lock object will expire after %d epoch", exp) - - var expirationAttr objectSDK.Attribute - expirationAttr.SetKey(objectV2.SysAttributeExpEpoch) - expirationAttr.SetValue(strconv.FormatUint(exp, 10)) - - obj := objectSDK.New() - obj.SetContainerID(cnr) - obj.SetOwnerID(idOwner) - obj.SetType(objectSDK.TypeLock) - obj.SetAttributes(expirationAttr) - obj.SetPayload(lock.Marshal()) - - var prm internalclient.PutObjectPrm - ReadOrOpenSessionViaClient(cmd, &prm, cli, key, cnr, nil) - Prepare(cmd, &prm) - prm.SetHeader(obj) - - res, err := internalclient.PutObject(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "Store lock object in FrostFS: %w", err) - - cmd.Printf("Lock object ID: %s\n", res.ID()) - cmd.Println("Objects successfully locked.") - }, -} - -func initCommandObjectLock() { - commonflags.Init(objectLockCmd) - initFlagSession(objectLockCmd, "PUT") - - ff := objectLockCmd.Flags() - - ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectLockCmd.MarkFlagRequired(commonflags.CIDFlag) - - ff.StringSlice(commonflags.OIDFlag, nil, commonflags.OIDFlagUsage) - _ = objectLockCmd.MarkFlagRequired(commonflags.OIDFlag) - - ff.Uint64P(commonflags.ExpireAt, "e", 0, "The last active epoch for the lock") - - ff.Uint64(commonflags.Lifetime, 0, "Lock lifetime") - objectLockCmd.MarkFlagsMutuallyExclusive(commonflags.ExpireAt, commonflags.Lifetime) -} diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go deleted file mode 100644 index 476238651..000000000 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ /dev/null @@ -1,602 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "slices" - "sync" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" - "golang.org/x/sync/errgroup" -) - -const ( - verifyPresenceAllFlag = "verify-presence-all" - preferInternalAddressesFlag = "prefer-internal-addresses" -) - -var ( - errNoAvailableEndpoint = errors.New("failed to create client: no available endpoint") - errMalformedComplexObject = errors.New("object consists of EC and non EC parts") -) - -type phyObject struct { - containerID cid.ID - objectID oid.ID - storedOnAllContainerNodes bool - ecHeader *ecHeader -} - -type ecHeader struct { - index uint32 - parent oid.ID -} - -type objectCounter struct { - sync.Mutex - total uint32 - isECcounted bool -} - -type objectPlacement struct { - requiredNodes []netmapSDK.NodeInfo - confirmedNodes []netmapSDK.NodeInfo -} - -type objectNodesResult struct { - errors []error - placements map[oid.ID]objectPlacement - total uint32 -} - -type ObjNodesDataObject struct { - ObjectID string `json:"object_id"` - RequiredNodes []string `json:"required_nodes,omitempty"` - ConfirmedNodes []string `json:"confirmed_nodes,omitempty"` - ECParentObjectID *string `json:"ec_parent_object_id,omitempty"` - ECIndex *uint32 `json:"ec_index,omitempty"` -} - -type objNodesResultJSON struct { - ObjectID string `json:"object_id"` - DataObjects []ObjNodesDataObject `json:"data_objects,omitempty"` - Errors []string `json:"errors,omitempty"` -} - -var objectNodesCmd = &cobra.Command{ - Use: "nodes", - Short: "List of nodes where the object is stored", - Long: `List of nodes where the object should be stored and where it is actually stored. - Lock objects must exist on all nodes of the container. - For complex and EC objects, a node is considered to store an object if the node stores at least one part of the complex object or one chunk of the EC object. - By default, the actual storage of the object is checked only on the nodes that should store the object. To check all nodes, use the flag --verify-presence-all.`, - Run: objectNodes, -} - -func initObjectNodesCmd() { - commonflags.Init(objectNodesCmd) - - flags := objectNodesCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectGetCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = objectGetCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.") - flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.") - flags.Bool(preferInternalAddressesFlag, false, "Use internal addresses first to get object info.") -} - -func objectNodes(cmd *cobra.Command, _ []string) { - var cnrID cid.ID - var objID oid.ID - ReadObjectAddress(cmd, &cnrID, &objID) - - pk := key.GetOrGenerate(cmd) - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) - - placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) - - result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - - getActualPlacement(cmd, netmap, pk, objects, count, result) - - printPlacement(cmd, objID, objects, result) -} - -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { - var addrObj oid.Address - addrObj.SetContainer(cnrID) - addrObj.SetObject(objID) - - var prmHead internalclient.HeadObjectPrm - prmHead.SetClient(cli) - prmHead.SetAddress(addrObj) - prmHead.SetRawFlag(true) - - Prepare(cmd, &prmHead) - readSession(cmd, &prmHead, pk, cnrID, objID) - - res, err := internalclient.HeadObject(cmd.Context(), prmHead) - if err == nil { - obj := phyObject{ - containerID: cnrID, - objectID: objID, - storedOnAllContainerNodes: res.Header().Type() == objectSDK.TypeLock || - res.Header().Type() == objectSDK.TypeTombstone || - len(res.Header().Children()) > 0, - } - if res.Header().ECHeader() != nil { - obj.ecHeader = &ecHeader{ - index: res.Header().ECHeader().Index(), - parent: res.Header().ECHeader().Parent(), - } - } - return []phyObject{obj}, 1 - } - - var errSplitInfo *objectSDK.SplitInfoError - if errors.As(err, &errSplitInfo) { - return getComplexObjectParts(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - } - - var ecInfoError *objectSDK.ECInfoError - if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 - } - commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil, 0 -} - -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { - members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total -} - -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { - var total int - splitInfo := errSplitInfo.SplitInfo() - - if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { - if total = len(members); total > 0 { - total-- // linking object is not data object - } - return members, total - } - - if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members, len(members) - } - - members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) - return members, len(members) -} - -func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { - result := make([]phyObject, 0, len(members)) - var hasNonEC, hasEC bool - var resultGuard sync.Mutex - - if len(members) == 0 { - return result - } - - prmHead.SetRawFlag(true) // to get an error instead of whole object - - eg, egCtx := errgroup.WithContext(cmd.Context()) - for idx := range members { - partObjID := members[idx] - - eg.Go(func() error { - partHeadPrm := prmHead - var partAddr oid.Address - partAddr.SetContainer(cnrID) - partAddr.SetObject(partObjID) - partHeadPrm.SetAddress(partAddr) - - obj, err := internalclient.HeadObject(egCtx, partHeadPrm) - if err != nil { - var ecInfoError *objectSDK.ECInfoError - if errors.As(err, &ecInfoError) { - resultGuard.Lock() - defer resultGuard.Unlock() - result = append(result, getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)...) - hasEC = true - return nil - } - return err - } - - if obj.Header().Type() != objectSDK.TypeRegular { - commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", fmt.Errorf("object '%s' with type '%s' is not supported as part of complex object", partAddr, obj.Header().Type())) - } - - if len(obj.Header().Children()) > 0 { - // linking object is not data object, so skip it - return nil - } - - resultGuard.Lock() - defer resultGuard.Unlock() - result = append(result, phyObject{ - containerID: cnrID, - objectID: partObjID, - }) - hasNonEC = true - - return nil - }) - } - - commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait()) - if hasEC && hasNonEC { - commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", errMalformedComplexObject) - } - return result -} - -func getECObjectChunks(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, errECInfo *objectSDK.ECInfoError) []phyObject { - ecInfo := errECInfo.ECInfo() - result := make([]phyObject, 0, len(ecInfo.Chunks)) - for _, ch := range ecInfo.Chunks { - var chID oid.ID - err := chID.ReadFromV2(ch.ID) - if err != nil { - commonCmd.ExitOnErr(cmd, "failed to read EC chunk ID %w", err) - return nil - } - result = append(result, phyObject{ - containerID: cnrID, - objectID: chID, - ecHeader: &ecHeader{ - index: ch.Index, - parent: objID, - }, - }) - } - return result -} - -func getPlacementPolicyAndNetmap(cmd *cobra.Command, cnrID cid.ID, cli *client.Client) (placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) { - eg, egCtx := errgroup.WithContext(cmd.Context()) - eg.Go(func() (e error) { - placementPolicy, e = getPlacementPolicy(egCtx, cnrID, cli) - return - }) - eg.Go(func() (e error) { - netmap, e = getNetMap(egCtx, cli) - return - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", eg.Wait()) - return -} - -func getPlacementPolicy(ctx context.Context, cnrID cid.ID, cli *client.Client) (netmapSDK.PlacementPolicy, error) { - prm := internalclient.GetContainerPrm{ - Client: cli, - ClientParams: client.PrmContainerGet{ - ContainerID: &cnrID, - }, - } - - res, err := internalclient.GetContainer(ctx, prm) - if err != nil { - return netmapSDK.PlacementPolicy{}, err - } - - return res.Container().PlacementPolicy(), nil -} - -func getNetMap(ctx context.Context, cli *client.Client) (*netmapSDK.NetMap, error) { - var prm internalclient.NetMapSnapshotPrm - prm.SetClient(cli) - - res, err := internalclient.NetMapSnapshot(ctx, prm) - if err != nil { - return nil, err - } - nm := res.NetMap() - return &nm, nil -} - -func getRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult { - if policy.IsECPlacement(placementPolicy) { - return getECRequiredPlacement(cmd, objects, placementPolicy, netmap) - } - return getReplicaRequiredPlacement(cmd, objects, placementPolicy, netmap) -} - -func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult { - result := &objectNodesResult{ - placements: make(map[oid.ID]objectPlacement), - } - placementBuilder := placement.NewNetworkMapBuilder(netmap) - for _, object := range objects { - placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) - commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) - for repIdx, rep := range placement { - numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() - var nodeIdx uint32 - for _, n := range rep { - if !object.storedOnAllContainerNodes && nodeIdx == numOfReplicas { - break - } - - op := result.placements[object.objectID] - op.requiredNodes = append(op.requiredNodes, n) - result.placements[object.objectID] = op - - nodeIdx++ - } - } - } - - return result -} - -func getECRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult { - result := &objectNodesResult{ - placements: make(map[oid.ID]objectPlacement), - } - for _, object := range objects { - getECRequiredPlacementInternal(cmd, object, placementPolicy, netmap, result) - } - return result -} - -func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap, result *objectNodesResult) { - placementObjectID := object.objectID - if object.ecHeader != nil { - placementObjectID = object.ecHeader.parent - } - placementBuilder := placement.NewNetworkMapBuilder(netmap) - placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) - commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) - - for _, vector := range placement { - if object.storedOnAllContainerNodes { - for _, node := range vector { - op := result.placements[object.objectID] - op.requiredNodes = append(op.requiredNodes, node) - result.placements[object.objectID] = op - } - continue - } - - if object.ecHeader != nil { - chunkIdx := int(object.ecHeader.index) - nodeIdx := chunkIdx % len(vector) - node := vector[nodeIdx] - - op := result.placements[object.objectID] - op.requiredNodes = append(op.requiredNodes, node) - result.placements[object.objectID] = op - } - } -} - -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { - resultMtx := &sync.Mutex{} - counter := &objectCounter{ - total: uint32(count), - } - - candidates := getNodesToCheckObjectExistance(cmd, netmap, result) - - eg, egCtx := errgroup.WithContext(cmd.Context()) - for _, cand := range candidates { - eg.Go(func() error { - cli, err := createClient(egCtx, cmd, cand, pk) - if err != nil { - resultMtx.Lock() - defer resultMtx.Unlock() - result.errors = append(result.errors, fmt.Errorf("failed to connect to node %s: %w", hex.EncodeToString(cand.PublicKey()), err)) - return nil - } - - for _, object := range objects { - eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) - resultMtx.Lock() - defer resultMtx.Unlock() - if err == nil && stored { - op := result.placements[object.objectID] - op.confirmedNodes = append(op.confirmedNodes, cand) - result.placements[object.objectID] = op - } - if err != nil { - result.errors = append(result.errors, fmt.Errorf("failed to check object %s existence on node %s: %w", object.objectID.EncodeToString(), hex.EncodeToString(cand.PublicKey()), err)) - } - return nil - }) - } - return nil - }) - } - - commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) - result.total = counter.total -} - -func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { - checkAllNodes, _ := cmd.Flags().GetBool(verifyPresenceAllFlag) - if checkAllNodes { - return netmap.Nodes() - } - var nodes []netmapSDK.NodeInfo - visited := make(map[uint64]struct{}) - for _, p := range result.placements { - for _, node := range p.requiredNodes { - if _, ok := visited[node.Hash()]; !ok { - nodes = append(nodes, node) - visited[node.Hash()] = struct{}{} - } - } - } - return nodes -} - -func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) { - var cli *client.Client - var addresses []string - if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { - addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) - addresses = append(addresses, candidate.ExternalAddresses()...) - } else { - addresses = append(addresses, candidate.ExternalAddresses()...) - addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) - } - - var lastErr error - for _, address := range addresses { - var networkAddr network.Address - lastErr = networkAddr.FromString(address) - if lastErr != nil { - continue - } - cli, lastErr = internalclient.GetSDKClient(ctx, cmd, pk, networkAddr) - if lastErr == nil { - break - } - } - if lastErr != nil { - return nil, lastErr - } - if cli == nil { - return nil, errNoAvailableEndpoint - } - return cli, nil -} - -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { - var addrObj oid.Address - addrObj.SetContainer(cnrID) - addrObj.SetObject(objID) - - var prmHead internalclient.HeadObjectPrm - prmHead.SetClient(cli) - prmHead.SetAddress(addrObj) - - Prepare(cmd, &prmHead) - prmHead.SetTTL(1) - readSession(cmd, &prmHead, pk, cnrID, objID) - - res, err := internalclient.HeadObject(ctx, prmHead) - if err == nil && res != nil { - if res.Header().ECHeader() != nil { - counter.Lock() - defer counter.Unlock() - if !counter.isECcounted { - counter.total *= res.Header().ECHeader().Total() - } - counter.isECcounted = true - } - return true, nil - } - var notFound *apistatus.ObjectNotFound - var removed *apistatus.ObjectAlreadyRemoved - if errors.As(err, ¬Found) || errors.As(err, &removed) { - return false, nil - } - return false, err -} - -func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - if json, _ := cmd.Flags().GetBool(commonflags.JSON); json { - printObjectNodesAsJSON(cmd, objID, objects, result) - } else { - printObjectNodesAsText(cmd, objID, objects, result) - } -} - -func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) - fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) - - for _, object := range objects { - fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) - if object.ecHeader != nil { - fmt.Fprintf(cmd.OutOrStdout(), "\tEC index: %d\n", object.ecHeader.index) - fmt.Fprintf(cmd.OutOrStdout(), "\tEC parent: %s\n", object.ecHeader.parent.EncodeToString()) - } - op, ok := result.placements[object.objectID] - if !ok { - continue - } - if len(op.requiredNodes) > 0 { - fmt.Fprintf(cmd.OutOrStdout(), "\tRequired nodes:\n") - for _, node := range op.requiredNodes { - fmt.Fprintf(cmd.OutOrStdout(), "\t\t- %s\n", hex.EncodeToString(node.PublicKey())) - } - } - if len(op.confirmedNodes) > 0 { - fmt.Fprintf(cmd.OutOrStdout(), "\tConfirmed nodes:\n") - for _, node := range op.confirmedNodes { - fmt.Fprintf(cmd.OutOrStdout(), "\t\t- %s\n", hex.EncodeToString(node.PublicKey())) - } - } - } - - if len(result.errors) == 0 { - return - } - fmt.Fprintf(cmd.OutOrStdout(), "Errors:\n") - for _, err := range result.errors { - fmt.Fprintf(cmd.OutOrStdout(), "\t%s\n", err.Error()) - } -} - -func printObjectNodesAsJSON(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - jsonResult := &objNodesResultJSON{ - ObjectID: objID.EncodeToString(), - } - - for _, object := range objects { - do := ObjNodesDataObject{ - ObjectID: object.objectID.EncodeToString(), - } - if object.ecHeader != nil { - do.ECIndex = &object.ecHeader.index - ecParent := object.ecHeader.parent.EncodeToString() - do.ECParentObjectID = &ecParent - } - op, ok := result.placements[object.objectID] - if !ok { - continue - } - for _, rn := range op.requiredNodes { - do.RequiredNodes = append(do.RequiredNodes, hex.EncodeToString(rn.PublicKey())) - } - for _, cn := range op.confirmedNodes { - do.ConfirmedNodes = append(do.ConfirmedNodes, hex.EncodeToString(cn.PublicKey())) - } - jsonResult.DataObjects = append(jsonResult.DataObjects, do) - } - for _, err := range result.errors { - jsonResult.Errors = append(jsonResult.Errors, err.Error()) - } - b, err := json.Marshal(jsonResult) - commonCmd.ExitOnErr(cmd, "failed to marshal json: %w", err) - cmd.Println(string(b)) -} diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go deleted file mode 100644 index ebbde76a2..000000000 --- a/cmd/frostfs-cli/modules/object/patch.go +++ /dev/null @@ -1,174 +0,0 @@ -package object - -import ( - "fmt" - "os" - "strconv" - "strings" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -const ( - newAttrsFlagName = "new-attrs" - replaceAttrsFlagName = "replace-attrs" - rangeFlagName = "range" - payloadFlagName = "payload" - splitHeaderFlagName = "split-header" -) - -var objectPatchCmd = &cobra.Command{ - Use: "patch", - Run: patch, - Short: "Patch FrostFS object", - Long: "Patch FrostFS object. Each range passed to the command requires to pass a corresponding patch payload.", - Example: ` -frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs -frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --range offX:lnX --payload /path/to/payloadX --range offY:lnY --payload /path/to/payloadY -frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs --range offX:lnX --payload /path/to/payload -`, -} - -func initObjectPatchCmd() { - commonflags.Init(objectPatchCmd) - initFlagSession(objectPatchCmd, "PATCH") - - flags := objectPatchCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") - flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") - flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") - flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") - flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") -} - -func patch(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - objAddr := ReadObjectAddress(cmd, &cnr, &obj) - - ranges, err := getRangeSlice(cmd) - commonCmd.ExitOnErr(cmd, "", err) - - payloads := patchPayloadPaths(cmd) - - if len(ranges) != len(payloads) { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("the number of ranges and payloads are not equal: ranges = %d, payloads = %d", len(ranges), len(payloads))) - } - - newAttrs, err := parseNewObjectAttrs(cmd) - commonCmd.ExitOnErr(cmd, "can't parse new object attributes: %w", err) - replaceAttrs, _ := cmd.Flags().GetBool(replaceAttrsFlagName) - - pk := key.GetOrGenerate(cmd) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.PatchObjectPrm - prm.SetClient(cli) - Prepare(cmd, &prm) - ReadOrOpenSession(cmd, &prm, pk, cnr, nil) - - prm.SetAddress(objAddr) - prm.NewAttributes = newAttrs - prm.ReplaceAttribute = replaceAttrs - - prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) - - for i := range ranges { - prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ - Range: ranges[i], - PayloadPath: payloads[i], - }) - } - - res, err := internalclient.Patch(cmd.Context(), prm) - if err != nil { - commonCmd.ExitOnErr(cmd, "can't patch the object: %w", err) - } - cmd.Println("Patched object ID: ", res.OID.EncodeToString()) -} - -func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) - if err != nil { - return nil, err - } - - attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes - for i := range rawAttrs { - k, v, found := strings.Cut(rawAttrs[i], "=") - if !found { - return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i]) - } - attrs[i].SetKey(k) - attrs[i].SetValue(v) - } - return attrs, nil -} - -func getRangeSlice(cmd *cobra.Command) ([]objectSDK.Range, error) { - v, _ := cmd.Flags().GetStringSlice(rangeFlagName) - if len(v) == 0 { - return []objectSDK.Range{}, nil - } - rs := make([]objectSDK.Range, len(v)) - for i := range v { - before, after, found := strings.Cut(v[i], rangeSep) - if !found { - return nil, fmt.Errorf("invalid range specifier: %s", v[i]) - } - - offset, err := strconv.ParseUint(before, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", v[i], err) - } - length, err := strconv.ParseUint(after, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid '%s' range length specifier: %w", v[i], err) - } - - rs[i].SetOffset(offset) - rs[i].SetLength(length) - } - return rs, nil -} - -func patchPayloadPaths(cmd *cobra.Command) []string { - v, _ := cmd.Flags().GetStringSlice(payloadFlagName) - return v -} - -func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { - path, _ := cmd.Flags().GetString(splitHeaderFlagName) - if path == "" { - return nil - } - - data, err := os.ReadFile(path) - commonCmd.ExitOnErr(cmd, "read file error: %w", err) - - splitHdrV2 := new(objectV2.SplitHeader) - err = splitHdrV2.Unmarshal(data) - if err != nil { - err = splitHdrV2.UnmarshalJSON(data) - commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) - } - - return objectSDK.NewSplitHeaderFromV2(splitHdrV2) -} diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go deleted file mode 100644 index 9e8a7cc6f..000000000 --- a/cmd/frostfs-cli/modules/object/put.go +++ /dev/null @@ -1,286 +0,0 @@ -package object - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/cheggaaa/pb" - "github.com/spf13/cobra" -) - -const ( - noProgressFlag = "no-progress" - notificationFlag = "notify" - copiesNumberFlag = "copies-number" - prepareLocallyFlag = "prepare-locally" -) - -var putExpiredOn uint64 - -var objectPutCmd = &cobra.Command{ - Use: "put", - Short: "Put object to FrostFS", - Long: "Put object to FrostFS", - Run: putObject, -} - -func initObjectPutCmd() { - commonflags.Init(objectPutCmd) - initFlagSession(objectPutCmd, "PUT") - - flags := objectPutCmd.Flags() - - flags.String(fileFlag, "", "File with object payload") - _ = objectPutCmd.MarkFlagFilename(fileFlag) - _ = objectPutCmd.MarkFlagRequired(fileFlag) - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - - flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") - flags.Bool("disable-filename", false, "Do not set well-known filename attribute") - flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") - flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") - flags.Bool(noProgressFlag, false, "Do not show progress bar") - flags.Bool(prepareLocallyFlag, false, "Generate object header on the client side (for big object - split locally too)") - - flags.String(notificationFlag, "", "Object notification in the form of *epoch*:*topic*; '-' topic means using default") - flags.Bool(binaryFlag, false, "Deserialize object structure from given file.") - - flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call") -} - -func putObject(cmd *cobra.Command, _ []string) { - binary, _ := cmd.Flags().GetBool(binaryFlag) - cidVal, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - if !binary && cidVal == "" { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.CIDFlag)) - } - pk := key.GetOrGenerate(cmd) - - var ownerID user.ID - var cnr cid.ID - - filename, _ := cmd.Flags().GetString(fileFlag) - f, err := os.OpenFile(filename, os.O_RDONLY, os.ModePerm) - if err != nil { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err)) - } - var payloadReader io.Reader = f - obj := objectSDK.New() - - if binary { - payloadReader, cnr, ownerID = readFilePayload(filename, cmd) - } else { - readCID(cmd, &cnr) - user.IDFromKey(&ownerID, pk.PublicKey) - } - - attrs := getAllObjectAttributes(cmd) - - obj.SetContainerID(cnr) - obj.SetOwnerID(ownerID) - obj.SetAttributes(attrs...) - - notificationInfo, err := parseObjectNotifications(cmd) - commonCmd.ExitOnErr(cmd, "can't parse object notification information: %w", err) - - if notificationInfo != nil { - obj.SetNotification(*notificationInfo) - } - - var prm internalclient.PutObjectPrm - if prepareLocally, _ := cmd.Flags().GetBool(prepareLocallyFlag); prepareLocally { - prm.SetClient(internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)) - prm.PrepareLocally() - } else { - ReadOrOpenSession(cmd, &prm, pk, cnr, nil) - } - Prepare(cmd, &prm) - prm.SetHeader(obj) - - var p *pb.ProgressBar - - noProgress, _ := cmd.Flags().GetBool(noProgressFlag) - if noProgress { - prm.SetPayloadReader(payloadReader) - } else { - if binary { - p = setBinaryPayloadReader(cmd, obj, &prm, payloadReader) - } else { - p = setFilePayloadReader(cmd, f, &prm) - } - } - - copyNum, err := cmd.Flags().GetString(copiesNumberFlag) - commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err) - prm.SetCopiesNumberByVectors(parseCopyNumber(cmd, copyNum)) - - res, err := internalclient.PutObject(cmd.Context(), prm) - if p != nil { - p.Finish() - } - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cmd.Printf("[%s] Object successfully stored\n", filename) - cmd.Printf(" OID: %s\n CID: %s\n", res.ID(), cnr) -} - -func parseCopyNumber(cmd *cobra.Command, copyNum string) []uint32 { - var cn []uint32 - if len(copyNum) > 0 { - for _, num := range strings.Split(copyNum, ",") { - val, err := strconv.ParseUint(num, 10, 32) - commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err) - cn = append(cn, uint32(val)) - } - } - return cn -} - -func readFilePayload(filename string, cmd *cobra.Command) (io.Reader, cid.ID, user.ID) { - buf, err := os.ReadFile(filename) - commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err) - objTemp := objectSDK.New() - // TODO(@acid-ant): #1932 Use streams to marshal/unmarshal payload - commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf)) - payloadReader := bytes.NewReader(objTemp.Payload()) - cnr, _ := objTemp.ContainerID() - ownerID := objTemp.OwnerID() - return payloadReader, cnr, ownerID -} - -func setFilePayloadReader(cmd *cobra.Command, f *os.File, prm *internalclient.PutObjectPrm) *pb.ProgressBar { - fi, err := f.Stat() - if err != nil { - cmd.PrintErrf("Failed to get file size, progress bar is disabled: %v\n", err) - prm.SetPayloadReader(f) - return nil - } - p := pb.New64(fi.Size()) - p.Output = cmd.OutOrStdout() - prm.SetPayloadReader(p.NewProxyReader(f)) - prm.SetHeaderCallback(func() { p.Start() }) - return p -} - -func setBinaryPayloadReader(cmd *cobra.Command, obj *objectSDK.Object, prm *internalclient.PutObjectPrm, payloadReader io.Reader) *pb.ProgressBar { - p := pb.New(len(obj.Payload())) - p.Output = cmd.OutOrStdout() - prm.SetPayloadReader(p.NewProxyReader(payloadReader)) - prm.SetHeaderCallback(func() { p.Start() }) - return p -} - -func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute { - attrs, err := parseObjectAttrs(cmd) - commonCmd.ExitOnErr(cmd, "can't parse object attributes: %w", err) - - expiresOn, _ := cmd.Flags().GetUint64(commonflags.ExpireAt) - if expiresOn > 0 { - var expAttrFound bool - expAttrValue := strconv.FormatUint(expiresOn, 10) - - for i := range attrs { - if attrs[i].Key() == objectV2.SysAttributeExpEpoch { - attrs[i].SetValue(expAttrValue) - expAttrFound = true - break - } - } - - if !expAttrFound { - index := len(attrs) - attrs = append(attrs, objectSDK.Attribute{}) - attrs[index].SetKey(objectV2.SysAttributeExpEpoch) - attrs[index].SetValue(expAttrValue) - } - } - return attrs -} - -func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - rawAttrs, err := cmd.Flags().GetStringSlice("attributes") - if err != nil { - return nil, err - } - - attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes - for i := range rawAttrs { - k, v, found := strings.Cut(rawAttrs[i], "=") - if !found { - return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i]) - } - attrs[i].SetKey(k) - attrs[i].SetValue(v) - } - - disableFilename, _ := cmd.Flags().GetBool("disable-filename") - if !disableFilename { - filename := filepath.Base(cmd.Flag(fileFlag).Value.String()) - index := len(attrs) - attrs = append(attrs, objectSDK.Attribute{}) - attrs[index].SetKey(objectSDK.AttributeFileName) - attrs[index].SetValue(filename) - } - - disableTime, _ := cmd.Flags().GetBool("disable-timestamp") - if !disableTime { - index := len(attrs) - attrs = append(attrs, objectSDK.Attribute{}) - attrs[index].SetKey(objectSDK.AttributeTimestamp) - attrs[index].SetValue(strconv.FormatInt(time.Now().Unix(), 10)) - } - - return attrs, nil -} - -func parseObjectNotifications(cmd *cobra.Command) (*objectSDK.NotificationInfo, error) { - const ( - separator = ":" - useDefaultTopic = "-" - ) - - raw := cmd.Flag(notificationFlag).Value.String() - if raw == "" { - return nil, nil - } - - before, after, found := strings.Cut(raw, separator) - if !found { - return nil, fmt.Errorf("notification must be in the form of: *epoch*%s*topic*, got %s", separator, raw) - } - - ni := new(objectSDK.NotificationInfo) - - epoch, err := strconv.ParseUint(before, 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse notification epoch %s: %w", before, err) - } - - ni.SetEpoch(epoch) - - if after == "" { - return nil, fmt.Errorf("incorrect empty topic: use %s to force using default topic", useDefaultTopic) - } - - if after != useDefaultTopic { - ni.SetTopic(after) - } - - return ni, nil -} diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go deleted file mode 100644 index 6ec508ae2..000000000 --- a/cmd/frostfs-cli/modules/object/range.go +++ /dev/null @@ -1,230 +0,0 @@ -package object - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var objectRangeCmd = &cobra.Command{ - Use: "range", - Short: "Get payload range data of an object", - Long: "Get payload range data of an object", - Run: getObjectRange, -} - -func initObjectRangeCmd() { - commonflags.Init(objectRangeCmd) - initFlagSession(objectRangeCmd, "RANGE") - - flags := objectRangeCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.StringSlice("range", nil, "Range to take data from in the form offset:length") - flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") - flags.Bool(rawFlag, false, rawFlagDesc) -} - -func getObjectRange(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - objAddr := ReadObjectAddress(cmd, &cnr, &obj) - - ranges, err := getRangeList(cmd) - commonCmd.ExitOnErr(cmd, "", err) - - if len(ranges) != 1 { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("exactly one range must be specified, got: %d", len(ranges))) - } - - var out io.Writer - - filename := cmd.Flag(fileFlag).Value.String() - if filename == "" { - out = os.Stdout - } else { - f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, os.ModePerm) - if err != nil { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err)) - } - - defer f.Close() - - out = f - } - - pk := key.GetOrGenerate(cmd) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.PayloadRangePrm - prm.SetClient(cli) - Prepare(cmd, &prm) - readSession(cmd, &prm, pk, cnr, obj) - - raw, _ := cmd.Flags().GetBool(rawFlag) - prm.SetRawFlag(raw) - prm.SetAddress(objAddr) - prm.SetRange(&ranges[0]) - prm.SetPayloadWriter(out) - - _, err = internalclient.PayloadRange(cmd.Context(), prm) - if err != nil { - if ok := printSplitInfoErr(cmd, err); ok { - return - } - - commonCmd.ExitOnErr(cmd, "can't get object payload range: %w", err) - } - - if filename != "" { - cmd.Printf("[%s] Payload successfully saved\n", filename) - } -} - -func printSplitInfoErr(cmd *cobra.Command, err error) bool { - var errSplitInfo *objectSDK.SplitInfoError - - ok := errors.As(err, &errSplitInfo) - - if ok { - cmd.PrintErrln("Object is complex, split information received.") - printSplitInfo(cmd, errSplitInfo.SplitInfo()) - } - - return ok -} - -func printSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) { - bs, err := marshalSplitInfo(cmd, info) - commonCmd.ExitOnErr(cmd, "can't marshal split info: %w", err) - - cmd.Println(string(bs)) -} - -func marshalSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) ([]byte, error) { - toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - toProto, _ := cmd.Flags().GetBool("proto") - switch { - case toJSON && toProto: - return nil, errors.New("'--json' and '--proto' flags are mutually exclusive") - case toJSON: - return info.MarshalJSON() - case toProto: - return info.Marshal() - default: - b := bytes.NewBuffer(nil) - if splitID := info.SplitID(); splitID != nil { - b.WriteString("Split ID: " + splitID.String() + "\n") - } - if link, ok := info.Link(); ok { - b.WriteString("Linking object: " + link.String() + "\n") - } - if last, ok := info.LastPart(); ok { - b.WriteString("Last object: " + last.String() + "\n") - } - return b.Bytes(), nil - } -} - -func printECInfoErr(cmd *cobra.Command, err error) bool { - var errECInfo *objectSDK.ECInfoError - - ok := errors.As(err, &errECInfo) - - if ok { - toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - toProto, _ := cmd.Flags().GetBool("proto") - if !toJSON && !toProto { - cmd.PrintErrln("Object is erasure-encoded, ec information received.") - } - printECInfo(cmd, errECInfo.ECInfo()) - } - - return ok -} - -func printECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) { - bs, err := marshalECInfo(cmd, info) - commonCmd.ExitOnErr(cmd, "can't marshal split info: %w", err) - - cmd.Println(string(bs)) -} - -func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) { - toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - toProto, _ := cmd.Flags().GetBool("proto") - switch { - case toJSON && toProto: - return nil, errors.New("'--json' and '--proto' flags are mutually exclusive") - case toJSON: - return info.MarshalJSON() - case toProto: - return info.Marshal() - default: - b := bytes.NewBuffer(nil) - b.WriteString("Total chunks: " + strconv.Itoa(int(info.Chunks[0].Total))) - for _, chunk := range info.Chunks { - var id oid.ID - if err := id.Decode(chunk.ID.GetValue()); err != nil { - return nil, fmt.Errorf("unable to decode chunk id: %w", err) - } - b.WriteString("\n Index: " + strconv.Itoa(int(chunk.Index)) + " ID: " + id.String()) - } - return b.Bytes(), nil - } -} - -func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { - vs, err := cmd.Flags().GetStringSlice("range") - if len(vs) == 0 || err != nil { - return nil, err - } - rs := make([]objectSDK.Range, len(vs)) - for i := range vs { - before, after, found := strings.Cut(vs[i], rangeSep) - if !found { - return nil, fmt.Errorf("invalid range specifier: %s", vs[i]) - } - - offset, err := strconv.ParseUint(before, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", vs[i], err) - } - length, err := strconv.ParseUint(after, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid '%s' range length specifier: %w", vs[i], err) - } - - if length == 0 { - return nil, fmt.Errorf("invalid '%s' range: zero length", vs[i]) - } - - if offset+length <= offset { - return nil, fmt.Errorf("invalid '%s' range: uint64 overflow", vs[i]) - } - - rs[i].SetOffset(offset) - rs[i].SetLength(length) - } - return rs, nil -} diff --git a/cmd/frostfs-cli/modules/object/root.go b/cmd/frostfs-cli/modules/object/root.go deleted file mode 100644 index b808a509e..000000000 --- a/cmd/frostfs-cli/modules/object/root.go +++ /dev/null @@ -1,52 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" -) - -// Cmd represents the object command. -var Cmd = &cobra.Command{ - Use: "object", - Short: "Operations with Objects", - Long: `Operations with Objects`, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - // bind exactly that cmd's flags to - // the viper before execution - commonflags.Bind(cmd) - commonflags.BindAPI(cmd) - }, -} - -func init() { - objectChildCommands := []*cobra.Command{ - objectPutCmd, - objectDelCmd, - objectGetCmd, - objectSearchCmd, - objectHeadCmd, - objectHashCmd, - objectRangeCmd, - objectLockCmd, - objectNodesCmd, - objectPatchCmd, - } - - Cmd.AddCommand(objectChildCommands...) - - for _, objCommand := range objectChildCommands { - InitBearer(objCommand) - commonflags.InitAPI(objCommand) - } - - initObjectPutCmd() - initObjectPatchCmd() - initObjectDeleteCmd() - initObjectGetCmd() - initObjectSearchCmd() - initObjectHeadCmd() - initObjectHashCmd() - initObjectRangeCmd() - initCommandObjectLock() - initObjectNodesCmd() -} diff --git a/cmd/frostfs-cli/modules/object/search.go b/cmd/frostfs-cli/modules/object/search.go deleted file mode 100644 index ca5d78bc9..000000000 --- a/cmd/frostfs-cli/modules/object/search.go +++ /dev/null @@ -1,145 +0,0 @@ -package object - -import ( - "fmt" - "os" - "strings" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var ( - searchFilters []string - - objectSearchCmd = &cobra.Command{ - Use: "search", - Short: "Search object", - Long: "Search object", - Run: searchObject, - } -) - -func initObjectSearchCmd() { - commonflags.Init(objectSearchCmd) - initFlagSession(objectSearchCmd, "SEARCH") - - flags := objectSearchCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = objectSearchCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.StringSliceVarP(&searchFilters, "filters", "f", nil, - "Repeated filter expressions or files with protobuf JSON") - - flags.Bool("root", false, "Search for user objects") - flags.Bool("phy", false, "Search physically stored objects") - flags.String(commonflags.OIDFlag, "", "Search object by identifier") -} - -func searchObject(cmd *cobra.Command, _ []string) { - var cnr cid.ID - readCID(cmd, &cnr) - - sf, err := parseSearchFilters(cmd) - commonCmd.ExitOnErr(cmd, "", err) - - pk := key.GetOrGenerate(cmd) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.SearchObjectsPrm - prm.SetClient(cli) - Prepare(cmd, &prm) - readSessionGlobal(cmd, &prm, pk, cnr) - prm.SetContainerID(cnr) - prm.SetFilters(sf) - - res, err := internalclient.SearchObjects(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - ids := res.IDList() - - cmd.Printf("Found %d objects.\n", len(ids)) - for i := range ids { - cmd.Println(ids[i].String()) - } -} - -var searchUnaryOpVocabulary = map[string]objectSDK.SearchMatchType{ - "NOPRESENT": objectSDK.MatchNotPresent, -} - -var searchBinaryOpVocabulary = map[string]objectSDK.SearchMatchType{ - "EQ": objectSDK.MatchStringEqual, - "NE": objectSDK.MatchStringNotEqual, - "COMMON_PREFIX": objectSDK.MatchCommonPrefix, -} - -func parseSearchFilters(cmd *cobra.Command) (objectSDK.SearchFilters, error) { - var fs objectSDK.SearchFilters - - for i := range searchFilters { - words := strings.Fields(searchFilters[i]) - - switch len(words) { - default: - return nil, fmt.Errorf("invalid field number: %d", len(words)) - case 1: - data, err := os.ReadFile(words[0]) - if err != nil { - return nil, fmt.Errorf("could not read attributes filter from file: %w", err) - } - - subFs := objectSDK.NewSearchFilters() - - if err := subFs.UnmarshalJSON(data); err != nil { - return nil, fmt.Errorf("could not unmarshal attributes filter from file: %w", err) - } - - fs = append(fs, subFs...) - case 2: - m, ok := searchUnaryOpVocabulary[words[1]] - if !ok { - return nil, fmt.Errorf("unsupported unary op: %s", words[1]) - } - - fs.AddFilter(words[0], "", m) - case 3: - m, ok := searchBinaryOpVocabulary[words[1]] - if !ok { - return nil, fmt.Errorf("unsupported binary op: %s", words[1]) - } - - fs.AddFilter(words[0], words[2], m) - } - } - - root, _ := cmd.Flags().GetBool("root") - if root { - fs.AddRootFilter() - } - - phy, _ := cmd.Flags().GetBool("phy") - if phy { - fs.AddPhyFilter() - } - - oid, _ := cmd.Flags().GetString(commonflags.OIDFlag) - if oid != "" { - var id oidSDK.ID - if err := id.DecodeString(oid); err != nil { - return nil, fmt.Errorf("could not parse object ID: %w", err) - } - - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - } - - return fs, nil -} diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go deleted file mode 100644 index 8e4e8b287..000000000 --- a/cmd/frostfs-cli/modules/object/util.go +++ /dev/null @@ -1,509 +0,0 @@ -package object - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "os" - "strings" - - internal "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - sessionCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/session" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - bearerTokenFlag = "bearer" - - rawFlag = "raw" - rawFlagDesc = "Set raw request option" - fileFlag = "file" - binaryFlag = "binary" -) - -type RPCParameters interface { - SetBearerToken(prm *bearer.Token) - SetTTL(uint32) - SetXHeaders([]string) -} - -// InitBearer adds bearer token flag to a command. -func InitBearer(cmd *cobra.Command) { - flags := cmd.Flags() - flags.String(bearerTokenFlag, "", "File with signed JSON or binary encoded bearer token") -} - -// Prepare prepares object-related parameters for a command. -func Prepare(cmd *cobra.Command, prms ...RPCParameters) { - ttl := viper.GetUint32(commonflags.TTL) - common.PrintVerbose(cmd, "TTL: %d", ttl) - - for i := range prms { - btok := common.ReadBearerToken(cmd, bearerTokenFlag) - - prms[i].SetBearerToken(btok) - prms[i].SetTTL(ttl) - prms[i].SetXHeaders(parseXHeaders(cmd)) - } -} - -func parseXHeaders(cmd *cobra.Command) []string { - xHeaders, _ := cmd.Flags().GetStringSlice(commonflags.XHeadersKey) - xs := make([]string, 0, 2*len(xHeaders)) - - for i := range xHeaders { - k, v, found := strings.Cut(xHeaders[i], "=") - if !found { - panic(fmt.Errorf("invalid X-Header format: %s", xHeaders[i])) - } - - xs = append(xs, k, v) - } - - return xs -} - -func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { - readCID(cmd, cnr) - readOID(cmd, obj) - - var addr oid.Address - addr.SetContainer(*cnr) - addr.SetObject(*obj) - return addr -} - -func readObjectAddressBin(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID, filename string) oid.Address { - buf, err := os.ReadFile(filename) - commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err) - objTemp := objectSDK.New() - commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf)) - - var addr oid.Address - *cnr, _ = objTemp.ContainerID() - *obj, _ = objTemp.ID() - addr.SetContainer(*cnr) - addr.SetObject(*obj) - return addr -} - -func readCID(cmd *cobra.Command, id *cid.ID) { - err := id.DecodeString(cmd.Flag(commonflags.CIDFlag).Value.String()) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) -} - -func readOID(cmd *cobra.Command, id *oid.ID) { - err := id.DecodeString(cmd.Flag(commonflags.OIDFlag).Value.String()) - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", err) -} - -// SessionPrm is a common interface of object operation's input which supports -// sessions. -type SessionPrm interface { - SetSessionToken(*session.Object) - SetClient(*client.Client) -} - -// forwards all parameters to _readVerifiedSession and object as nil. -func readSessionGlobal(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID) { - _readVerifiedSession(cmd, dst, key, cnr, nil) -} - -// forwards all parameters to _readVerifiedSession. -func readSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj oid.ID) { - _readVerifiedSession(cmd, dst, key, cnr, &obj) -} - -// decodes session.Object from the file by path specified in the -// commonflags.SessionToken flag. Returns nil if flag is not set. -func getSession(cmd *cobra.Command) *session.Object { - common.PrintVerbose(cmd, "Trying to read session from the file...") - - path, _ := cmd.Flags().GetString(commonflags.SessionToken) - if path == "" { - common.PrintVerbose(cmd, "File with session token is not provided.") - return nil - } - - common.PrintVerbose(cmd, "Reading session from the file [%s]...", path) - - var tok session.Object - - err := common.ReadBinaryOrJSON(cmd, &tok, path) - commonCmd.ExitOnErr(cmd, "read session: %v", err) - - return &tok -} - -// decodes object session from JSON file from commonflags.SessionToken command -// flag if it is provided, and writes resulting session into the provided SessionPrm. -// Returns flag presence. Checks: -// -// - if session verb corresponds to given SessionPrm according to its type -// - relation to the given container -// - relation to the given object if non-nil -// - relation to the given private key used within the command -// - session signature -// -// SessionPrm MUST be one of: -// -// *internal.GetObjectPrm -// *internal.HeadObjectPrm -// *internal.SearchObjectsPrm -// *internal.PayloadRangePrm -// *internal.HashPayloadRangesPrm -func _readVerifiedSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) { - var cmdVerb session.ObjectVerb - - switch dst.(type) { - default: - panic(fmt.Sprintf("unsupported op parameters %T", dst)) - case *internal.GetObjectPrm: - cmdVerb = session.VerbObjectGet - case *internal.HeadObjectPrm: - cmdVerb = session.VerbObjectHead - case *internal.SearchObjectsPrm: - cmdVerb = session.VerbObjectSearch - case *internal.PayloadRangePrm: - cmdVerb = session.VerbObjectRange - case *internal.HashPayloadRangesPrm: - cmdVerb = session.VerbObjectRangeHash - } - - tok := getSession(cmd) - if tok == nil { - return - } - - common.PrintVerbose(cmd, "Checking session correctness...") - - switch false { - case tok.AssertContainer(cnr): - commonCmd.ExitOnErr(cmd, "", errors.New("unrelated container in the session")) - case obj == nil || tok.AssertObject(*obj): - commonCmd.ExitOnErr(cmd, "", errors.New("unrelated object in the session")) - case tok.AssertVerb(cmdVerb): - commonCmd.ExitOnErr(cmd, "", errors.New("wrong verb of the session")) - case tok.AssertAuthKey((*frostfsecdsa.PublicKey)(&key.PublicKey)): - commonCmd.ExitOnErr(cmd, "", errors.New("unrelated key in the session")) - case tok.VerifySignature(): - commonCmd.ExitOnErr(cmd, "", errors.New("invalid signature of the session data")) - } - - common.PrintVerbose(cmd, "Session is correct.") - - dst.SetSessionToken(tok) -} - -// ReadOrOpenSession opens client connection and calls ReadOrOpenSessionViaClient with it. -func ReadOrOpenSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) { - cli := internal.GetSDKClientByFlag(cmd, key, commonflags.RPC) - ReadOrOpenSessionViaClient(cmd, dst, cli, key, cnr, obj) -} - -// ReadOrOpenSessionViaClient tries to read session from the file specified in -// commonflags.SessionToken flag, finalizes structures of the decoded token -// and write the result into provided SessionPrm. If file is missing, -// ReadOrOpenSessionViaClient calls OpenSessionViaClient. -func ReadOrOpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) { - tok := getSession(cmd) - if tok == nil { - OpenSessionViaClient(cmd, dst, cli, key, cnr, obj) - return - } - - var objs []oid.ID - if obj != nil { - objs = []oid.ID{*obj} - - if _, ok := dst.(*internal.DeleteObjectPrm); ok { - common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - - objs = append(objs, collectObjectRelatives(cmd, cli, cnr, *obj)...) - } - } - - finalizeSession(cmd, dst, tok, key, cnr, objs...) - dst.SetClient(cli) -} - -// OpenSession opens client connection and calls OpenSessionViaClient with it. -func OpenSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) { - cli := internal.GetSDKClientByFlag(cmd, key, commonflags.RPC) - OpenSessionViaClient(cmd, dst, cli, key, cnr, obj) -} - -// OpenSessionViaClient opens object session with the remote node, finalizes -// structure of the session token and writes the result into the provided -// SessionPrm. Also writes provided client connection to the SessionPrm. -// -// SessionPrm MUST be one of: -// -// *internal.PutObjectPrm -// *internal.DeleteObjectPrm -// -// If provided SessionPrm is of type internal.DeleteObjectPrm, OpenSessionViaClient -// spreads the session to all object's relatives. -func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) { - var objs []oid.ID - - if obj != nil { - if _, ok := dst.(*internal.DeleteObjectPrm); ok { - common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - - objs = collectObjectRelatives(cmd, cli, cnr, *obj) - objs = append(objs, *obj) - } - } - - var tok session.Object - - const sessionLifetime = 10 // in FrostFS epochs - - common.PrintVerbose(cmd, "Opening remote session with the node...") - - err := sessionCli.CreateSession(cmd.Context(), &tok, cli, sessionLifetime) - commonCmd.ExitOnErr(cmd, "open remote session: %w", err) - - common.PrintVerbose(cmd, "Session successfully opened.") - - finalizeSession(cmd, dst, &tok, key, cnr, objs...) - - dst.SetClient(cli) -} - -// specifies session verb, binds the session to the given container and limits -// the session by the given objects (if specified). After all data is written, -// signs session using provided private key and writes the session into the -// given SessionPrm. -// -// SessionPrm MUST be one of: -// -// *internal.PutObjectPrm -// *internal.DeleteObjectPrm -func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, key *ecdsa.PrivateKey, cnr cid.ID, objs ...oid.ID) { - common.PrintVerbose(cmd, "Finalizing session token...") - - switch dst.(type) { - default: - panic(fmt.Sprintf("unsupported op parameters %T", dst)) - case *internal.PutObjectPrm: - common.PrintVerbose(cmd, "Binding session to object PUT...") - tok.ForVerb(session.VerbObjectPut) - case *internal.PatchObjectPrm: - tok.ForVerb(session.VerbObjectPatch) - case *internal.DeleteObjectPrm: - common.PrintVerbose(cmd, "Binding session to object DELETE...") - tok.ForVerb(session.VerbObjectDelete) - } - - common.PrintVerbose(cmd, "Binding session to container %s...", cnr) - - tok.BindContainer(cnr) - if len(objs) > 0 { - common.PrintVerbose(cmd, "Limiting session by the objects %v...", objs) - tok.LimitByObjects(objs...) - } - - common.PrintVerbose(cmd, "Signing session...") - - err := tok.Sign(*key) - commonCmd.ExitOnErr(cmd, "sign session: %w", err) - - common.PrintVerbose(cmd, "Session token successfully formed and attached to the request.") - - dst.SetSessionToken(tok) -} - -// calls commonflags.InitSession with "object " name. -func initFlagSession(cmd *cobra.Command, verb string) { - commonflags.InitSession(cmd, "object "+verb) -} - -// collects and returns all relatives of the given object stored in the specified -// container. Empty result without an error means lack of relationship in the -// container. -// -// The object itself is not included in the result. -func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID { - common.PrintVerbose(cmd, "Fetching raw object header...") - - // request raw header first - var addrObj oid.Address - addrObj.SetContainer(cnr) - addrObj.SetObject(obj) - - var prmHead internal.HeadObjectPrm - prmHead.SetClient(cli) - prmHead.SetAddress(addrObj) - prmHead.SetRawFlag(true) - - Prepare(cmd, &prmHead) - - o, err := internal.HeadObject(cmd.Context(), prmHead) - - var errSplit *objectSDK.SplitInfoError - var errEC *objectSDK.ECInfoError - - switch { - default: - commonCmd.ExitOnErr(cmd, "failed to get raw object header: %w", err) - case err == nil: - common.PrintVerbose(cmd, "Raw header received - object is singular.") - if ech := o.Header().ECHeader(); ech != nil { - commonCmd.ExitOnErr(cmd, "Lock EC chunk failed: %w", errors.ErrUnsupported) - } - return nil - case errors.As(err, &errSplit): - common.PrintVerbose(cmd, "Split information received - object is virtual.") - splitInfo := errSplit.SplitInfo() - - if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok { - return members - } - - if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnr); ok { - return members - } - - return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj) - case errors.As(err, &errEC): - common.PrintVerbose(cmd, "Object is erasure-coded.") - return nil - } - return nil -} - -func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) { - // collect split chain by the descending ease of operations (ease is evaluated heuristically). - // If any approach fails, we don't try the next since we assume that it will fail too. - - if idLinking, ok := splitInfo.Link(); ok { - common.PrintVerbose(cmd, "Collecting split members using linking object %s...", idLinking) - - var addrObj oid.Address - addrObj.SetContainer(cnr) - addrObj.SetObject(idLinking) - - prmHead.SetAddress(addrObj) - prmHead.SetRawFlag(false) - // client is already set - - res, err := internal.HeadObject(cmd.Context(), prmHead) - if err == nil { - children := res.Header().Children() - - common.PrintVerbose(cmd, "Received split members from the linking object: %v", children) - - return append(children, idLinking), true - } - - // linking object is not required for - // object collecting - common.PrintVerbose(cmd, "failed to get linking object's header: %w", err) - } - return nil, false -} - -func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, cli *client.Client, cnr cid.ID) ([]oid.ID, bool) { - if idSplit := splitInfo.SplitID(); idSplit != nil { - common.PrintVerbose(cmd, "Collecting split members by split ID...") - - var query objectSDK.SearchFilters - query.AddSplitIDFilter(objectSDK.MatchStringEqual, idSplit) - - var prm internal.SearchObjectsPrm - prm.SetContainerID(cnr) - prm.SetClient(cli) - prm.SetFilters(query) - - res, err := internal.SearchObjects(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "failed to search objects by split ID: %w", err) - - parts := res.IDList() - - common.PrintVerbose(cmd, "Found objects by split ID: %v", res.IDList()) - - return parts, true - } - return nil, false -} - -func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID { - var addrObj oid.Address - addrObj.SetContainer(cnr) - - idMember, ok := splitInfo.LastPart() - if !ok { - commonCmd.ExitOnErr(cmd, "", errors.New("missing any data in received object split information")) - } - - common.PrintVerbose(cmd, "Traverse the object split chain in reverse...", idMember) - - var res *internal.HeadObjectRes - var err error - - chain := []oid.ID{idMember} - chainSet := map[oid.ID]struct{}{idMember: {}} - - prmHead.SetRawFlag(false) - // split members are almost definitely singular, but don't get hung up on it - - for { - common.PrintVerbose(cmd, "Reading previous element of the split chain member %s...", idMember) - - addrObj.SetObject(idMember) - prmHead.SetAddress(addrObj) - - res, err = internal.HeadObject(cmd.Context(), prmHead) - commonCmd.ExitOnErr(cmd, "failed to read split chain member's header: %w", err) - - idMember, ok = res.Header().PreviousID() - if !ok { - common.PrintVerbose(cmd, "Chain ended.") - break - } - - if _, ok = chainSet[idMember]; ok { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("duplicated member in the split chain %s", idMember)) - } - - chain = append(chain, idMember) - chainSet[idMember] = struct{}{} - } - - common.PrintVerbose(cmd, "Looking for a linking object...") - - var query objectSDK.SearchFilters - query.AddParentIDFilter(objectSDK.MatchStringEqual, obj) - - var prmSearch internal.SearchObjectsPrm - prmSearch.SetClient(cli) - prmSearch.SetContainerID(cnr) - prmSearch.SetFilters(query) - - resSearch, err := internal.SearchObjects(cmd.Context(), prmSearch) - commonCmd.ExitOnErr(cmd, "failed to find object children: %w", err) - - list := resSearch.IDList() - - for i := range list { - if _, ok = chainSet[list[i]]; !ok { - common.PrintVerbose(cmd, "Found one more related object %s.", list[i]) - chain = append(chain, list[i]) - } - } - - return chain -} diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go deleted file mode 100644 index 88acab341..000000000 --- a/cmd/frostfs-cli/modules/root.go +++ /dev/null @@ -1,139 +0,0 @@ -package cmd - -import ( - "os" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - accountingCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/accounting" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl" - apemanager "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/ape_manager" - bearerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/bearer" - containerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/container" - controlCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/control" - netmapCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/netmap" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - sessionCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/session" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/tree" - utilCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - envPrefix = "FROSTFS_CLI" -) - -// Global scope flags. -var ( - cfgFile string - cfgDir string -) - -// rootCmd represents the base command when called without any subcommands. -var rootCmd = &cobra.Command{ - Use: "frostfs-cli", - Short: "Command Line Tool to work with FrostFS", - Long: `FrostFS CLI provides all basic interactions with FrostFS and it's services. - -It contains commands for interaction with FrostFS nodes using different versions -of frostfs-api and some useful utilities for compiling ACL rules from JSON -notation, managing container access through protocol gates, querying network map -and much more!`, - Run: entryPoint, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - common.StartClientCommandSpan(cmd) - }, - PersistentPostRun: common.StopClientCommandSpan, -} - -// Execute adds all child commands to the root command and sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute() { - err := rootCmd.Execute() - commonCmd.ExitOnErr(rootCmd, "", err) -} - -func init() { - cobra.OnInitialize(initConfig) - cobra.EnableTraverseRunHooks = true - - // use stdout as default output for cmd.Print() - rootCmd.SetOut(os.Stdout) - - // Here you will define your flags and configuration settings. - // Cobra supports persistent flags, which, if defined here, - // will be global for your application. - rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "Config file (default is $HOME/.config/frostfs-cli/config.yaml)") - rootCmd.PersistentFlags().StringVar(&cfgDir, "config-dir", "", "Config directory") - rootCmd.PersistentFlags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, - false, commonflags.VerboseUsage) - - _ = viper.BindPFlag(commonflags.Verbose, rootCmd.PersistentFlags().Lookup(commonflags.Verbose)) - - // Cobra also supports local flags, which will only run - // when this action is called directly. - rootCmd.Flags().Bool("version", false, "Application version and FrostFS API compatibility") - - rootCmd.AddCommand(acl.Cmd) - rootCmd.AddCommand(apemanager.Cmd) - rootCmd.AddCommand(bearerCli.Cmd) - rootCmd.AddCommand(sessionCli.Cmd) - rootCmd.AddCommand(accountingCli.Cmd) - rootCmd.AddCommand(controlCli.Cmd) - rootCmd.AddCommand(utilCli.Cmd) - rootCmd.AddCommand(netmapCli.Cmd) - rootCmd.AddCommand(objectCli.Cmd) - rootCmd.AddCommand(containerCli.Cmd) - rootCmd.AddCommand(tree.Cmd) - rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) -} - -func entryPoint(cmd *cobra.Command, _ []string) { - printVersion, _ := cmd.Flags().GetBool("version") - if printVersion { - cmd.Print(misc.BuildInfo("FrostFS CLI")) - - return - } - - _ = cmd.Usage() -} - -// initConfig reads in config file and ENV variables if set. -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find config directory. - configDir, err := os.UserConfigDir() - if err != nil { - common.PrintVerbose(rootCmd, "Get config dir: %s", err) - } else { - // Search config in `$XDG_CONFIG_HOME/frostfs-cli/` with name "config.yaml" - viper.AddConfigPath(filepath.Join(configDir, "frostfs-cli")) - viper.SetConfigName("config") - viper.SetConfigType("yaml") - } - } - - viper.SetEnvPrefix(envPrefix) - viper.AutomaticEnv() // read in environment variables that match - - // If a config file is found, read it in. - if err := viper.ReadInConfig(); err == nil { - common.PrintVerbose(rootCmd, "Using config file: %s", viper.ConfigFileUsed()) - } - - if cfgDir != "" { - if err := config.ReadConfigDir(viper.GetViper(), cfgDir); err != nil { - commonCmd.ExitOnErr(rootCmd, "failed to read config dir: %w", err) - } - } -} diff --git a/cmd/frostfs-cli/modules/session/create.go b/cmd/frostfs-cli/modules/session/create.go deleted file mode 100644 index e13200a5d..000000000 --- a/cmd/frostfs-cli/modules/session/create.go +++ /dev/null @@ -1,135 +0,0 @@ -package session - -import ( - "context" - "fmt" - "os" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "github.com/google/uuid" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - outFlag = "out" - jsonFlag = commonflags.JSON -) - -const defaultLifetime = 10 - -var createCmd = &cobra.Command{ - Use: "create", - Short: "Create session token", - Run: createSession, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.Account, cmd.Flags().Lookup(commonflags.Account)) - }, -} - -func init() { - createCmd.Flags().Uint64P(commonflags.Lifetime, "l", defaultLifetime, "Number of epochs for token to stay valid") - createCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) - createCmd.Flags().StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - createCmd.Flags().String(outFlag, "", "File to write session token to") - createCmd.Flags().Bool(jsonFlag, false, "Output token in JSON") - createCmd.Flags().StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage) - - _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.WalletPath) - _ = cobra.MarkFlagRequired(createCmd.Flags(), outFlag) - _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.RPC) -} - -func createSession(cmd *cobra.Command, _ []string) { - privKey := key.Get(cmd) - - var netAddr network.Address - addrStr, _ := cmd.Flags().GetString(commonflags.RPC) - commonCmd.ExitOnErr(cmd, "can't parse endpoint: %w", netAddr.FromString(addrStr)) - - c, err := internalclient.GetSDKClient(cmd.Context(), cmd, privKey, netAddr) - commonCmd.ExitOnErr(cmd, "can't create client: %w", err) - - lifetime := uint64(defaultLifetime) - if lfArg, _ := cmd.Flags().GetUint64(commonflags.Lifetime); lfArg != 0 { - lifetime = lfArg - } - - var tok session.Object - - err = CreateSession(cmd.Context(), &tok, c, lifetime) - commonCmd.ExitOnErr(cmd, "can't create session: %w", err) - - var data []byte - - if toJSON, _ := cmd.Flags().GetBool(jsonFlag); toJSON { - data, err = tok.MarshalJSON() - commonCmd.ExitOnErr(cmd, "can't decode session token JSON: %w", err) - } else { - data = tok.Marshal() - } - - filename, _ := cmd.Flags().GetString(outFlag) - err = os.WriteFile(filename, data, 0o644) - commonCmd.ExitOnErr(cmd, "can't write token to file: %w", err) -} - -// CreateSession opens a new communication with FrostFS storage node using client connection. -// The session is expected to be maintained by the storage node during the given -// number of epochs. -// -// Fills ID, lifetime and session key. -func CreateSession(ctx context.Context, dst *session.Object, c *client.Client, lifetime uint64) error { - netInfoPrm := internalclient.NetworkInfoPrm{ - Client: c, - } - - ni, err := internalclient.NetworkInfo(ctx, netInfoPrm) - if err != nil { - return fmt.Errorf("can't fetch network info: %w", err) - } - - cur := ni.NetworkInfo().CurrentEpoch() - exp := cur + lifetime - - var sessionPrm internalclient.CreateSessionPrm - sessionPrm.SetClient(c) - sessionPrm.Expiration = exp - - sessionRes, err := internalclient.CreateSession(ctx, sessionPrm) - if err != nil { - return fmt.Errorf("can't open session: %w", err) - } - - binIDSession := sessionRes.ID() - - var keySession frostfsecdsa.PublicKey - - err = keySession.Decode(sessionRes.SessionKey()) - if err != nil { - return fmt.Errorf("decode public session key: %w", err) - } - - var idSession uuid.UUID - - err = idSession.UnmarshalBinary(binIDSession) - if err != nil { - return fmt.Errorf("decode session ID: %w", err) - } - - dst.SetID(idSession) - dst.SetNbf(cur) - dst.SetIat(cur) - dst.SetExp(exp) - dst.SetAuthKey(&keySession) - - return nil -} diff --git a/cmd/frostfs-cli/modules/session/root.go b/cmd/frostfs-cli/modules/session/root.go deleted file mode 100644 index 3554a0ee1..000000000 --- a/cmd/frostfs-cli/modules/session/root.go +++ /dev/null @@ -1,14 +0,0 @@ -package session - -import ( - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "session", - Short: "Operations with session token", -} - -func init() { - Cmd.AddCommand(createCmd) -} diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go deleted file mode 100644 index e2c05d486..000000000 --- a/cmd/frostfs-cli/modules/tree/add.go +++ /dev/null @@ -1,100 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var addCmd = &cobra.Command{ - Use: "add", - Short: "Add a node to the tree service", - Run: add, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initAddCmd() { - commonflags.Init(addCmd) - initCTID(addCmd) - - ff := addCmd.Flags() - ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2") - ff.Uint64(parentIDFlagKey, 0, "Parent node ID") -} - -func add(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - - var cnr cid.ID - err := cnr.DecodeString(cmd.Flag(commonflags.CIDFlag).Value.String()) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - pid, _ := cmd.Flags().GetUint64(parentIDFlagKey) - - meta, err := parseMeta(cmd) - commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err) - - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - var bt []byte - if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { - bt = t.Marshal() - } - - req := new(tree.AddRequest) - req.Body = &tree.AddRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - ParentId: pid, - Meta: meta, - BearerToken: bt, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - resp, err := cli.Add(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call add: %w", err) - - cmd.Println("Node ID: ", resp.GetBody().GetNodeId()) -} - -func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) { - raws, _ := cmd.Flags().GetStringSlice(metaFlagKey) - if len(raws) == 0 { - return nil, nil - } - - pairs := make([]tree.KeyValue, 0, len(raws)) - for i := range raws { - k, v, found := strings.Cut(raws[i], "=") - if !found { - return nil, fmt.Errorf("invalid meta pair format: %s", raws[i]) - } - - var pair tree.KeyValue - pair.Key = k - pair.Value = []byte(v) - - pairs = append(pairs, pair) - } - - return pairs, nil -} diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go deleted file mode 100644 index 7263bcd0d..000000000 --- a/cmd/frostfs-cli/modules/tree/add_by_path.go +++ /dev/null @@ -1,99 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/spf13/cobra" -) - -var addByPathCmd = &cobra.Command{ - Use: "add-by-path", - Short: "Add a node by the path", - Run: addByPath, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initAddByPathCmd() { - commonflags.Init(addByPathCmd) - initCTID(addByPathCmd) - - ff := addByPathCmd.Flags() - - // tree service does not allow any attribute except - // the 'FileName' but that's a limitation of the - // current implementation, not the rule - // ff.String(pathAttributeFlagKey, "", "Path attribute") - ff.String(pathFlagKey, "", "Path to a node") - ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2") - - _ = cobra.MarkFlagRequired(ff, pathFlagKey) -} - -func addByPath(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - - cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidRaw) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - meta, err := parseMeta(cmd) - commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err) - - path, _ := cmd.Flags().GetString(pathFlagKey) - - var bt []byte - if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { - bt = t.Marshal() - } - - req := new(tree.AddByPathRequest) - req.Body = &tree.AddByPathRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - PathAttribute: objectSDK.AttributeFileName, - // PathAttribute: pAttr, - Path: strings.Split(path, "/"), - Meta: meta, - BearerToken: bt, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - resp, err := cli.AddByPath(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to addByPath %w", err) - - cmd.Printf("Parent ID: %d\n", resp.GetBody().GetParentId()) - - nn := resp.GetBody().GetNodes() - if len(nn) == 0 { - common.PrintVerbose(cmd, "No new nodes were created") - return - } - - cmd.Println("Created nodes:") - for _, node := range resp.GetBody().GetNodes() { - cmd.Printf("\t%d\n", node) - } -} diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go deleted file mode 100644 index d71a94b98..000000000 --- a/cmd/frostfs-cli/modules/tree/client.go +++ /dev/null @@ -1,68 +0,0 @@ -package tree - -import ( - "context" - "crypto/tls" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" -) - -// _client returns grpc Tree service client. Should be removed -// after making Tree API public. -func _client() (tree.TreeServiceClient, error) { - var netAddr network.Address - - rpcEndpoint := viper.GetString(commonflags.RPC) - if rpcEndpoint == "" { - return nil, fmt.Errorf("%s is not defined", commonflags.RPC) - } - - err := netAddr.FromString(rpcEndpoint) - if err != nil { - return nil, err - } - - host, isTLS, err := client.ParseURI(netAddr.URIAddr()) - if err != nil { - return nil, err - } - - creds := insecure.NewCredentials() - if isTLS { - creds = credentials.NewTLS(&tls.Config{}) - } - - opts := []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - tracing.NewUnaryClientInterceptor(), - ), - grpc.WithChainStreamInterceptor( - tracing.NewStreamClientInterceptor(), - ), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - grpc.WithDisableServiceConfig(), - grpc.WithTransportCredentials(creds), - } - - cc, err := grpc.NewClient(host, opts...) - return tree.NewTreeServiceClient(cc), err -} - -func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) { - if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 { - common.PrintVerbose(cmd, "Set request timeout to %s.", timeout) - return context.WithTimeout(cmd.Context(), timeout) - } - return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault) -} diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go deleted file mode 100644 index 210630e60..000000000 --- a/cmd/frostfs-cli/modules/tree/get_by_path.go +++ /dev/null @@ -1,102 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/spf13/cobra" -) - -var getByPathCmd = &cobra.Command{ - Use: "get-by-path", - Short: "Get a node by its path", - Run: getByPath, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initGetByPathCmd() { - commonflags.Init(getByPathCmd) - initCTID(getByPathCmd) - - ff := getByPathCmd.Flags() - - // tree service does not allow any attribute except - // the 'FileName' but that's a limitation of the - // current implementation, not the rule - // ff.String(pathAttributeFlagKey, "", "Path attribute") - ff.String(pathFlagKey, "", "Path to a node") - - ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node") -} - -func getByPath(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - - cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidRaw) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - latestOnly, _ := cmd.Flags().GetBool(latestOnlyFlagKey) - path, _ := cmd.Flags().GetString(pathFlagKey) - - var bt []byte - if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { - bt = t.Marshal() - } - - req := new(tree.GetNodeByPathRequest) - req.Body = &tree.GetNodeByPathRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - PathAttribute: objectSDK.AttributeFileName, - // PathAttribute: pAttr, - Path: strings.Split(path, "/"), - LatestOnly: latestOnly, - AllAttributes: true, - BearerToken: bt, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - resp, err := cli.GetNodeByPath(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call getNodeByPath: %w", err) - - nn := resp.GetBody().GetNodes() - if len(nn) == 0 { - common.PrintVerbose(cmd, "The node is not found") - return - } - - for _, n := range nn { - cmd.Printf("%d:\n", n.GetNodeId()) - - cmd.Println("\tParent ID: ", n.GetParentId()) - cmd.Println("\tTimestamp: ", n.GetTimestamp()) - - cmd.Println("\tMeta pairs: ") - for _, kv := range n.GetMeta() { - cmd.Printf("\t\t%s: %s\n", kv.GetKey(), string(kv.GetValue())) - } - } -} diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go deleted file mode 100644 index 9d767ab3e..000000000 --- a/cmd/frostfs-cli/modules/tree/get_op_log.go +++ /dev/null @@ -1,91 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "errors" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var getOpLogCmd = &cobra.Command{ - Use: "get-op-log", - Short: "Get logged operations starting with some height", - Run: getOpLog, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initGetOpLogCmd() { - commonflags.Init(getOpLogCmd) - initCTID(getOpLogCmd) - - ff := getOpLogCmd.Flags() - ff.Uint64(heightFlagKey, 0, "Height to start with") - ff.Uint64(countFlagKey, 10, "Logged operations count") -} - -func getOpLog(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - - cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidRaw) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - height, _ := cmd.Flags().GetUint64(heightFlagKey) - count, _ := cmd.Flags().GetUint64(countFlagKey) - - req := &tree.GetOpLogRequest{ - Body: &tree.GetOpLogRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - Height: height, - Count: count, - }, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - resp, err := cli.GetOpLog(ctx, req) - commonCmd.ExitOnErr(cmd, "get op log: %w", err) - - opLogResp, err := resp.Recv() - for ; err == nil; opLogResp, err = resp.Recv() { - o := opLogResp.GetBody().GetOperation() - - cmd.Println("Parent ID: ", o.GetParentId()) - - cmd.Println("\tChild ID: ", o.GetChildId()) - - m := &pilorama.Meta{} - err = m.FromBytes(o.GetMeta()) - commonCmd.ExitOnErr(cmd, "could not unmarshal meta: %w", err) - cmd.Printf("\tMeta:\n") - cmd.Printf("\t\tTime: %d\n", m.Time) - for _, item := range m.Items { - cmd.Printf("\t\t%s: %s\n", item.Key, item.Value) - } - } - if !errors.Is(err, io.EOF) { - commonCmd.ExitOnErr(cmd, "get op log response stream: %w", err) - } -} diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go deleted file mode 100644 index c581b8e26..000000000 --- a/cmd/frostfs-cli/modules/tree/healthcheck.go +++ /dev/null @@ -1,42 +0,0 @@ -package tree - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - "github.com/spf13/cobra" -) - -var healthcheckCmd = &cobra.Command{ - Use: "healthcheck", - Short: "Check tree service availability", - Run: healthcheck, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initHealthcheckCmd() { - commonflags.Init(healthcheckCmd) -} - -func healthcheck(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - req := &tree.HealthcheckRequest{ - Body: &tree.HealthcheckRequest_Body{}, - } - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - _, err = cli.Healthcheck(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call healthcheck: %w", err) - - common.PrintVerbose(cmd, "Successful healthcheck invocation.") -} diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go deleted file mode 100644 index ee1db2a79..000000000 --- a/cmd/frostfs-cli/modules/tree/list.go +++ /dev/null @@ -1,62 +0,0 @@ -package tree - -import ( - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var listCmd = &cobra.Command{ - Use: "list", - Short: "Get tree IDs", - Run: list, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initListCmd() { - commonflags.Init(listCmd) - - ff := listCmd.Flags() - ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = listCmd.MarkFlagRequired(commonflags.CIDFlag) -} - -func list(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidString) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - req := &tree.TreeListRequest{ - Body: &tree.TreeListRequest_Body{ - ContainerId: rawCID, - }, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - resp, err := cli.TreeList(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call treeList %w", err) - - for _, treeID := range resp.GetBody().GetIds() { - cmd.Println(treeID) - } -} diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go deleted file mode 100644 index 7a369bd02..000000000 --- a/cmd/frostfs-cli/modules/tree/move.go +++ /dev/null @@ -1,106 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "errors" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var moveCmd = &cobra.Command{ - Use: "move", - Short: "Move node", - Run: move, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initMoveCmd() { - commonflags.Init(moveCmd) - initCTID(moveCmd) - - ff := moveCmd.Flags() - ff.Uint64(nodeIDFlagKey, 0, "Node ID.") - ff.Uint64(parentIDFlagKey, 0, "Parent ID.") - - _ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey) - _ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey) -} - -func move(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidString) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - pid, _ := cmd.Flags().GetUint64(parentIDFlagKey) - nid, _ := cmd.Flags().GetUint64(nodeIDFlagKey) - - var bt []byte - if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { - bt = t.Marshal() - } - - subTreeReq := &tree.GetSubTreeRequest{ - Body: &tree.GetSubTreeRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - RootId: []uint64{nid}, - Depth: 1, - BearerToken: bt, - }, - } - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(subTreeReq, pk)) - resp, err := cli.GetSubTree(ctx, subTreeReq) - commonCmd.ExitOnErr(cmd, "rpc call: %w", err) - - var meta []tree.KeyValue - subtreeResp, err := resp.Recv() - for ; err == nil; subtreeResp, err = resp.Recv() { - meta = subtreeResp.GetBody().GetMeta() - } - if !errors.Is(err, io.EOF) { - commonCmd.ExitOnErr(cmd, "failed to read getSubTree response stream: %w", err) - } - var metaErr error - if len(meta) == 0 { - metaErr = errors.New("no meta for given node ID") - } - commonCmd.ExitOnErr(cmd, "unexpected rpc call result: %w", metaErr) - - req := &tree.MoveRequest{ - Body: &tree.MoveRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - ParentId: pid, - NodeId: nid, - Meta: meta, - }, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - _, err = cli.Move(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call move: %w", err) - common.PrintVerbose(cmd, "Successful move invocation.") -} diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go deleted file mode 100644 index 3c532fe26..000000000 --- a/cmd/frostfs-cli/modules/tree/remove.go +++ /dev/null @@ -1,73 +0,0 @@ -package tree - -import ( - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove node", - Run: remove, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initRemoveCmd() { - commonflags.Init(removeCmd) - initCTID(removeCmd) - - ff := removeCmd.Flags() - ff.Uint64(nodeIDFlagKey, 0, "Node ID.") - - _ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey) -} - -func remove(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidString) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - - nid, _ := cmd.Flags().GetUint64(nodeIDFlagKey) - - var bt []byte - if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { - bt = t.Marshal() - } - req := &tree.RemoveRequest{ - Body: &tree.RemoveRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - NodeId: nid, - BearerToken: bt, - }, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - _, err = cli.Remove(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call remove: %w", err) - common.PrintVerbose(cmd, "Successful remove invocation.") -} diff --git a/cmd/frostfs-cli/modules/tree/root.go b/cmd/frostfs-cli/modules/tree/root.go deleted file mode 100644 index 5a53c50d6..000000000 --- a/cmd/frostfs-cli/modules/tree/root.go +++ /dev/null @@ -1,65 +0,0 @@ -package tree - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" -) - -var Cmd = &cobra.Command{ - Use: "tree", - Short: "Operations with the Tree service", -} - -func init() { - Cmd.AddCommand(addCmd) - Cmd.AddCommand(getByPathCmd) - Cmd.AddCommand(addByPathCmd) - Cmd.AddCommand(listCmd) - Cmd.AddCommand(healthcheckCmd) - Cmd.AddCommand(moveCmd) - Cmd.AddCommand(removeCmd) - Cmd.AddCommand(getSubtreeCmd) - Cmd.AddCommand(getOpLogCmd) - - initAddCmd() - initGetByPathCmd() - initAddByPathCmd() - initListCmd() - initHealthcheckCmd() - initMoveCmd() - initRemoveCmd() - initGetSubtreeCmd() - initGetOpLogCmd() -} - -const ( - treeIDFlagKey = "tid" - parentIDFlagKey = "pid" - nodeIDFlagKey = "nid" - rootIDFlagKey = "root" - - metaFlagKey = "meta" - - pathFlagKey = "path" - - latestOnlyFlagKey = "latest" - - bearerFlagKey = "bearer" - - heightFlagKey = "height" - countFlagKey = "count" - depthFlagKey = "depth" - orderFlagKey = "ordered" -) - -func initCTID(cmd *cobra.Command) { - ff := cmd.Flags() - - ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = cmd.MarkFlagRequired(commonflags.CIDFlag) - - ff.String(treeIDFlagKey, "", "Tree ID") - _ = cmd.MarkFlagRequired(treeIDFlagKey) - - ff.String(bearerFlagKey, "", "Path to bearer token") -} diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go deleted file mode 100644 index c5f7ad401..000000000 --- a/cmd/frostfs-cli/modules/tree/subtree.go +++ /dev/null @@ -1,116 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "errors" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var getSubtreeCmd = &cobra.Command{ - Use: "get-subtree", - Short: "Get subtree", - Run: getSubTree, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, -} - -func initGetSubtreeCmd() { - commonflags.Init(getSubtreeCmd) - initCTID(getSubtreeCmd) - - ff := getSubtreeCmd.Flags() - ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.") - ff.Uint32(depthFlagKey, 10, "Traversal depth.") - ff.Bool(orderFlagKey, false, "Sort output by ascending FileName.") - - _ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag) - _ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey) -} - -func getSubTree(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag) - - var cnr cid.ID - err := cnr.DecodeString(cidString) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) - - ctx, cancel := contextWithTimeout(cmd) - defer cancel() - - cli, err := _client() - commonCmd.ExitOnErr(cmd, "failed to create client: %w", err) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) - - tid, _ := cmd.Flags().GetString(treeIDFlagKey) - - rid, _ := cmd.Flags().GetUint64(rootIDFlagKey) - - depth, _ := cmd.Flags().GetUint32(depthFlagKey) - - order, _ := cmd.Flags().GetBool(orderFlagKey) - - bodyOrder := tree.GetSubTreeRequest_Body_Order_None - if order { - bodyOrder = tree.GetSubTreeRequest_Body_Order_Asc - } - - var bt []byte - if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil { - bt = t.Marshal() - } - - req := &tree.GetSubTreeRequest{ - Body: &tree.GetSubTreeRequest_Body{ - ContainerId: rawCID, - TreeId: tid, - RootId: []uint64{rid}, - Depth: depth, - BearerToken: bt, - OrderBy: &tree.GetSubTreeRequest_Body_Order{ - Direction: bodyOrder, - }, - }, - } - - commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk)) - - resp, err := cli.GetSubTree(ctx, req) - commonCmd.ExitOnErr(cmd, "failed to call getSubTree: %w", err) - - subtreeResp, err := resp.Recv() - for ; err == nil; subtreeResp, err = resp.Recv() { - b := subtreeResp.GetBody() - - if len(b.GetNodeId()) == 1 { - cmd.Printf("Node ID: %d\n", b.GetNodeId()) - cmd.Println("\tParent ID: ", b.GetParentId()) - cmd.Println("\tTimestamp: ", b.GetTimestamp()) - } else { - cmd.Printf("Node IDs: %v\n", b.GetNodeId()) - cmd.Println("\tParent IDs: ", b.GetParentId()) - cmd.Println("\tTimestamps: ", b.GetTimestamp()) - } - - if meta := b.GetMeta(); len(meta) > 0 { - cmd.Println("\tMeta pairs: ") - for _, kv := range meta { - cmd.Printf("\t\t%s: %s\n", kv.GetKey(), string(kv.GetValue())) - } - } - } - if !errors.Is(err, io.EOF) { - commonCmd.ExitOnErr(cmd, "rpc call: %w", err) - } -} diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go deleted file mode 100644 index 145dcc756..000000000 --- a/cmd/frostfs-cli/modules/util/acl.go +++ /dev/null @@ -1,325 +0,0 @@ -package util - -import ( - "bytes" - "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" - "strings" - "text/tabwriter" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "github.com/flynn-archive/go-shlex" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/olekukonko/tablewriter" - "github.com/spf13/cobra" -) - -// PrettyPrintTableBACL print basic ACL in table format. -func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) { - // Header - w := tabwriter.NewWriter(cmd.OutOrStdout(), 1, 4, 4, ' ', 0) - fmt.Fprintln(w, "\tRangeHASH\tRange\tSearch\tDelete\tPut\tHead\tGet") - // Bits - bits := []string{ - boolToString(bacl.Sticky()) + " " + boolToString(!bacl.Extendable()), - getRoleBitsForOperation(bacl, acl.OpObjectHash), getRoleBitsForOperation(bacl, acl.OpObjectRange), - getRoleBitsForOperation(bacl, acl.OpObjectSearch), getRoleBitsForOperation(bacl, acl.OpObjectDelete), - getRoleBitsForOperation(bacl, acl.OpObjectPut), getRoleBitsForOperation(bacl, acl.OpObjectHead), - getRoleBitsForOperation(bacl, acl.OpObjectGet), - } - fmt.Fprintln(w, strings.Join(bits, "\t")) - // Footer - footer := []string{"X F"} - for range 7 { - footer = append(footer, "U S O B") - } - fmt.Fprintln(w, strings.Join(footer, "\t")) - - w.Flush() - - cmd.Println(" X-Sticky F-Final U-User S-System O-Others B-Bearer") -} - -func getRoleBitsForOperation(bacl *acl.Basic, op acl.Op) string { - return boolToString(bacl.IsOpAllowed(op, acl.RoleOwner)) + " " + - boolToString(bacl.IsOpAllowed(op, acl.RoleContainer)) + " " + - boolToString(bacl.IsOpAllowed(op, acl.RoleOthers)) + " " + - boolToString(bacl.AllowedBearerRules(op)) -} - -func boolToString(b bool) string { - if b { - return "1" - } - return "0" -} - -// PrettyPrintTableEACL print extended ACL in table format. -func PrettyPrintTableEACL(cmd *cobra.Command, table *eacl.Table) { - out := tablewriter.NewWriter(cmd.OutOrStdout()) - out.SetHeader([]string{"Operation", "Action", "Filters", "Targets"}) - out.SetAlignment(tablewriter.ALIGN_CENTER) - out.SetRowLine(true) - - out.SetAutoWrapText(false) - - for _, r := range table.Records() { - out.Append([]string{ - r.Operation().String(), - r.Action().String(), - eaclFiltersToString(r.Filters()), - eaclTargetsToString(r.Targets()), - }) - } - - out.Render() -} - -func eaclTargetsToString(ts []eacl.Target) string { - b := bytes.NewBuffer(nil) - for _, t := range ts { - keysExists := len(t.BinaryKeys()) > 0 - switch t.Role() { - case eacl.RoleUser: - b.WriteString("User") - if keysExists { - b.WriteString(": ") - } - case eacl.RoleSystem: - b.WriteString("System") - if keysExists { - b.WriteString(": ") - } - case eacl.RoleOthers: - b.WriteString("Others") - if keysExists { - b.WriteString(": ") - } - default: - b.WriteString("Unknown") - if keysExists { - b.WriteString(": ") - } - } - - for i, pub := range t.BinaryKeys() { - if i != 0 { - b.WriteString(" ") - } - b.WriteString(hex.EncodeToString(pub)) - b.WriteString("\n") - } - } - - return b.String() -} - -func eaclFiltersToString(fs []eacl.Filter) string { - b := bytes.NewBuffer(nil) - tw := tabwriter.NewWriter(b, 0, 0, 1, ' ', 0) - - for _, f := range fs { - switch f.From() { - case eacl.HeaderFromObject: - _, _ = tw.Write([]byte("O:\t")) - case eacl.HeaderFromRequest: - _, _ = tw.Write([]byte("R:\t")) - case eacl.HeaderFromService: - _, _ = tw.Write([]byte("S:\t")) - default: - _, _ = tw.Write([]byte(" \t")) - } - - _, _ = tw.Write([]byte(f.Key())) - - switch f.Matcher() { - case eacl.MatchStringEqual: - _, _ = tw.Write([]byte("\t==\t")) - case eacl.MatchStringNotEqual: - _, _ = tw.Write([]byte("\t!=\t")) - case eacl.MatchUnknown: - } - - _, _ = tw.Write([]byte(f.Value() + "\t")) - _, _ = tw.Write([]byte("\n")) - } - - _ = tw.Flush() - - // To have nice output with tabwriter, we must append newline - // after the last line. Here we strip it to delete empty line - // in the final output. - s := b.String() - if len(s) > 0 { - s = s[:len(s)-1] - } - - return s -} - -// ParseEACLRules parses eACL table. -// Uses ParseEACLRule. -func ParseEACLRules(table *eacl.Table, rules []string) error { - if len(rules) == 0 { - return errors.New("no extended ACL rules has been provided") - } - - for _, ruleStr := range rules { - err := ParseEACLRule(table, ruleStr) - if err != nil { - return fmt.Errorf("can't create extended acl record from rule '%s': %v", ruleStr, err) - } - } - return nil -} - -// ParseEACLRule parses eACL table from the following form: -// [ ...] [ ...] -// -// Examples: -// allow get req:X-Header=123 obj:Attr=value others:0xkey1,key2 system:key3 user:key4 -// -//nolint:godot -func ParseEACLRule(table *eacl.Table, rule string) error { - r, err := shlex.Split(rule) - if err != nil { - return fmt.Errorf("can't parse rule '%s': %v", rule, err) - } - return parseEACLTable(table, r) -} - -func parseEACLTable(tb *eacl.Table, args []string) error { - if len(args) < 2 { - return errors.New("at least 2 arguments must be provided") - } - - var action eacl.Action - if !action.FromString(strings.ToUpper(args[0])) { - return errors.New("invalid action (expected 'allow' or 'deny')") - } - - ops, err := eaclOperationsFromString(args[1]) - if err != nil { - return err - } - - r, err := parseEACLRecord(args[2:]) - if err != nil { - return err - } - - r.SetAction(action) - - for _, op := range ops { - r := *r - r.SetOperation(op) - tb.AddRecord(&r) - } - - return nil -} - -func parseEACLRecord(args []string) (*eacl.Record, error) { - r := new(eacl.Record) - for _, arg := range args { - before, after, found := strings.Cut(arg, ":") - - switch prefix := strings.ToLower(before); prefix { - case "req", "obj": // filters - if !found { - return nil, fmt.Errorf("invalid filter or target: %s", arg) - } - - var key, value string - var op eacl.Match - var f bool - - key, value, f = strings.Cut(after, "!=") - if f { - op = eacl.MatchStringNotEqual - } else { - key, value, f = strings.Cut(after, "=") - if !f { - return nil, fmt.Errorf("invalid filter key-value pair: %s", after) - } - op = eacl.MatchStringEqual - } - - typ := eacl.HeaderFromRequest - if before == "obj" { - typ = eacl.HeaderFromObject - } - - r.AddFilter(typ, op, key, value) - case "others", "system", "user", "pubkey": // targets - var err error - - var pubs []ecdsa.PublicKey - if found { - pubs, err = parseKeyList(after) - if err != nil { - return nil, err - } - } - - var role eacl.Role - if prefix != "pubkey" { - role, err = eaclRoleFromString(prefix) - if err != nil { - return nil, err - } - } - - eacl.AddFormedTarget(r, role, pubs...) - - default: - return nil, fmt.Errorf("invalid prefix: %s", before) - } - } - - return r, nil -} - -// eaclRoleFromString parses eacl.Role from string. -func eaclRoleFromString(s string) (eacl.Role, error) { - var r eacl.Role - if !r.FromString(strings.ToUpper(s)) { - return r, fmt.Errorf("unexpected role %s", s) - } - - return r, nil -} - -// parseKeyList parses list of hex-encoded public keys separated by comma. -func parseKeyList(s string) ([]ecdsa.PublicKey, error) { - ss := strings.Split(s, ",") - pubs := make([]ecdsa.PublicKey, len(ss)) - for i := range ss { - st := strings.TrimPrefix(ss[i], "0x") - pub, err := keys.NewPublicKeyFromString(st) - if err != nil { - return nil, fmt.Errorf("invalid public key '%s': %w", ss[i], err) - } - - pubs[i] = ecdsa.PublicKey(*pub) - } - - return pubs, nil -} - -// eaclOperationsFromString parses list of eacl.Operation separated by comma. -func eaclOperationsFromString(s string) ([]eacl.Operation, error) { - ss := strings.Split(s, ",") - ops := make([]eacl.Operation, len(ss)) - - for i := range ss { - if !ops[i].FromString(strings.ToUpper(ss[i])) { - return nil, fmt.Errorf("invalid operation: %s", ss[i]) - } - } - - return ops, nil -} diff --git a/cmd/frostfs-cli/modules/util/convert.go b/cmd/frostfs-cli/modules/util/convert.go deleted file mode 100644 index 23a25be07..000000000 --- a/cmd/frostfs-cli/modules/util/convert.go +++ /dev/null @@ -1,14 +0,0 @@ -package util - -import "github.com/spf13/cobra" - -var convertCmd = &cobra.Command{ - Use: "convert", - Short: "Convert representation of FrostFS structures", -} - -func initConvertCmd() { - convertCmd.AddCommand(convertEACLCmd) - - initConvertEACLCmd() -} diff --git a/cmd/frostfs-cli/modules/util/convert_eacl.go b/cmd/frostfs-cli/modules/util/convert_eacl.go deleted file mode 100644 index caa6dfcfe..000000000 --- a/cmd/frostfs-cli/modules/util/convert_eacl.go +++ /dev/null @@ -1,74 +0,0 @@ -package util - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/spf13/cobra" -) - -const ( - fromFlagStr = "from" - toFlagStr = "to" - apeFlagStr = "ape" -) - -var convertEACLCmd = &cobra.Command{ - Use: "eacl", - Short: "Convert representation of extended ACL table", - Run: convertEACLTable, -} - -func initConvertEACLCmd() { - flags := convertEACLCmd.Flags() - - flags.String(fromFlagStr, "", "File with JSON or binary encoded extended ACL table") - _ = convertEACLCmd.MarkFlagFilename(fromFlagStr) - _ = convertEACLCmd.MarkFlagRequired(fromFlagStr) - - flags.String(toFlagStr, "", "File to dump extended ACL table (default: binary encoded)") - flags.Bool(commonflags.JSON, false, "Dump extended ACL table in JSON encoding") - - flags.Bool(apeFlagStr, false, "Dump converted eACL table to APE chain format") - - convertEACLCmd.MarkFlagsMutuallyExclusive(apeFlagStr, commonflags.JSON) -} - -func convertEACLTable(cmd *cobra.Command, _ []string) { - pathFrom := cmd.Flag(fromFlagStr).Value.String() - to := cmd.Flag(toFlagStr).Value.String() - jsonFlag, _ := cmd.Flags().GetBool(commonflags.JSON) - apeFlag, _ := cmd.Flags().GetBool(apeFlagStr) - - table := common.ReadEACL(cmd, pathFrom) - - var data []byte - var err error - - if apeFlag { - var ch *chain.Chain - ch, err = apeutil.ConvertEACLToAPE(table) - commonCmd.ExitOnErr(cmd, "convert eACL table to APE chain error: %w", err) - data = ch.Bytes() - } else if jsonFlag || len(to) == 0 { - data, err = table.MarshalJSON() - commonCmd.ExitOnErr(cmd, "can't JSON encode extended ACL table: %w", err) - } else { - data, err = table.Marshal() - commonCmd.ExitOnErr(cmd, "can't binary encode extended ACL table: %w", err) - } - - if len(to) == 0 { - common.PrettyPrintJSON(cmd, table, "eACL") - return - } - - err = os.WriteFile(to, data, 0o644) - commonCmd.ExitOnErr(cmd, "can't write exteded ACL table to file: %w", err) - - cmd.Printf("extended ACL table was successfully dumped to %s\n", to) -} diff --git a/cmd/frostfs-cli/modules/util/keyer.go b/cmd/frostfs-cli/modules/util/keyer.go deleted file mode 100644 index ee2497348..000000000 --- a/cmd/frostfs-cli/modules/util/keyer.go +++ /dev/null @@ -1,99 +0,0 @@ -package util - -import ( - "crypto/rand" - "errors" - "fmt" - "os" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/keyer" - "github.com/spf13/cobra" -) - -var keyerCmd = &cobra.Command{ - Use: "keyer", - Short: "Generate or print information about keys", - Run: processKeyer, -} - -var errKeyerSingleArgument = errors.New("pass only one argument at a time") - -func initKeyerCmd() { - keyerCmd.Flags().BoolP("generate", "g", false, "Generate new private key") - keyerCmd.Flags().Bool("hex", false, "Print all values in hex encoding") - keyerCmd.Flags().BoolP("uncompressed", "u", false, "Use uncompressed public key format") - keyerCmd.Flags().BoolP("multisig", "m", false, "Calculate multisig address from public keys") -} - -func processKeyer(cmd *cobra.Command, args []string) { - var ( - err error - - result = new(keyer.Dashboard) - generate, _ = cmd.Flags().GetBool("generate") - useHex, _ = cmd.Flags().GetBool("hex") - uncompressed, _ = cmd.Flags().GetBool("uncompressed") - multisig, _ = cmd.Flags().GetBool("multisig") - ) - - if multisig { - err = result.ParseMultiSig(args) - } else { - if len(args) > 1 { - commonCmd.ExitOnErr(cmd, "", errKeyerSingleArgument) - } - - var argument string - if len(args) > 0 { - argument = args[0] - } - - switch { - case generate: - err = keyerGenerate(argument, result) - case fileExists(argument): - err = keyerParseFile(argument, result) - default: - err = result.ParseString(argument) - } - } - - commonCmd.ExitOnErr(cmd, "", err) - - result.PrettyPrint(uncompressed, useHex) -} - -func keyerGenerate(filename string, d *keyer.Dashboard) error { - key := make([]byte, keyer.NeoPrivateKeySize) - - _, err := rand.Read(key) - if err != nil { - return fmt.Errorf("can't get random source: %w", err) - } - - err = d.ParseBinary(key) - if err != nil { - return fmt.Errorf("can't parse key: %w", err) - } - - if filename != "" { - return os.WriteFile(filename, key, 0o600) - } - - return nil -} - -func fileExists(filename string) bool { - info, err := os.Stat(filename) - return !os.IsNotExist(err) && !info.IsDir() -} - -func keyerParseFile(filename string, d *keyer.Dashboard) error { - data, err := os.ReadFile(filename) - if err != nil { - return fmt.Errorf("can't open %v file: %w", filename, err) - } - - return d.ParseBinary(data) -} diff --git a/cmd/frostfs-cli/modules/util/root.go b/cmd/frostfs-cli/modules/util/root.go deleted file mode 100644 index a909e6899..000000000 --- a/cmd/frostfs-cli/modules/util/root.go +++ /dev/null @@ -1,31 +0,0 @@ -package util - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var Cmd = &cobra.Command{ - Use: "util", - Short: "Utility operations", - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - flags := cmd.Flags() - - _ = viper.BindPFlag(commonflags.GenerateKey, flags.Lookup(commonflags.GenerateKey)) - _ = viper.BindPFlag(commonflags.WalletPath, flags.Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.Account, flags.Lookup(commonflags.Account)) - }, -} - -func init() { - Cmd.AddCommand( - signCmd, - convertCmd, - keyerCmd, - ) - - initSignCmd() - initConvertCmd() - initKeyerCmd() -} diff --git a/cmd/frostfs-cli/modules/util/sign.go b/cmd/frostfs-cli/modules/util/sign.go deleted file mode 100644 index e8cb086e0..000000000 --- a/cmd/frostfs-cli/modules/util/sign.go +++ /dev/null @@ -1,22 +0,0 @@ -package util - -import ( - "github.com/spf13/cobra" -) - -const ( - signFromFlag = "from" - signToFlag = "to" -) - -var signCmd = &cobra.Command{ - Use: "sign", - Short: "Sign FrostFS structure", -} - -func initSignCmd() { - signCmd.AddCommand(signBearerCmd, signSessionCmd) - - initSignBearerCmd() - initSignSessionCmd() -} diff --git a/cmd/frostfs-cli/modules/util/sign_bearer.go b/cmd/frostfs-cli/modules/util/sign_bearer.go deleted file mode 100644 index 991216958..000000000 --- a/cmd/frostfs-cli/modules/util/sign_bearer.go +++ /dev/null @@ -1,63 +0,0 @@ -package util - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/spf13/cobra" -) - -const ( - signBearerJSONFlag = commonflags.JSON -) - -var signBearerCmd = &cobra.Command{ - Use: "bearer-token", - Short: "Sign bearer token to use it in requests", - Run: signBearerToken, -} - -func initSignBearerCmd() { - commonflags.InitWithoutRPC(signBearerCmd) - - flags := signBearerCmd.Flags() - - flags.String(signFromFlag, "", "File with JSON or binary encoded bearer token to sign") - _ = signBearerCmd.MarkFlagFilename(signFromFlag) - _ = signBearerCmd.MarkFlagRequired(signFromFlag) - - flags.String(signToFlag, "", "File to dump signed bearer token (default: binary encoded)") - flags.Bool(signBearerJSONFlag, false, "Dump bearer token in JSON encoding") -} - -func signBearerToken(cmd *cobra.Command, _ []string) { - btok := common.ReadBearerToken(cmd, signFromFlag) - pk := key.GetOrGenerate(cmd) - - err := btok.Sign(*pk) - commonCmd.ExitOnErr(cmd, "", err) - - to := cmd.Flag(signToFlag).Value.String() - jsonFlag, _ := cmd.Flags().GetBool(signBearerJSONFlag) - - var data []byte - if jsonFlag || len(to) == 0 { - data, err = btok.MarshalJSON() - commonCmd.ExitOnErr(cmd, "can't JSON encode bearer token: %w", err) - } else { - data = btok.Marshal() - } - - if len(to) == 0 { - common.PrettyPrintJSON(cmd, btok, "bearer token") - return - } - - err = os.WriteFile(to, data, 0o644) - commonCmd.ExitOnErr(cmd, "can't write signed bearer token to file: %w", err) - - cmd.Printf("signed bearer token was successfully dumped to %s\n", to) -} diff --git a/cmd/frostfs-cli/modules/util/sign_session.go b/cmd/frostfs-cli/modules/util/sign_session.go deleted file mode 100644 index ba76678dc..000000000 --- a/cmd/frostfs-cli/modules/util/sign_session.go +++ /dev/null @@ -1,85 +0,0 @@ -package util - -import ( - "crypto/ecdsa" - "encoding/json" - "errors" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "github.com/spf13/cobra" -) - -var signSessionCmd = &cobra.Command{ - Use: "session-token", - Short: "Sign session token to use it in requests", - Run: signSessionToken, -} - -func initSignSessionCmd() { - commonflags.InitWithoutRPC(signSessionCmd) - - flags := signSessionCmd.Flags() - - flags.String(signFromFlag, "", "File with JSON encoded session token to sign") - _ = signSessionCmd.MarkFlagFilename(signFromFlag) - _ = signSessionCmd.MarkFlagRequired(signFromFlag) - - flags.String(signToFlag, "", "File to save signed session token (optional)") -} - -func signSessionToken(cmd *cobra.Command, _ []string) { - fPath, err := cmd.Flags().GetString(signFromFlag) - commonCmd.ExitOnErr(cmd, "", err) - - if fPath == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("missing session token flag")) - } - - type iTokenSession interface { - json.Marshaler - common.BinaryOrJSON - Sign(ecdsa.PrivateKey) error - } - var errLast error - var stok iTokenSession - - for _, el := range [...]iTokenSession{ - new(session.Object), - new(session.Container), - } { - errLast = common.ReadBinaryOrJSON(cmd, el, fPath) - if errLast == nil { - stok = el - break - } - } - - commonCmd.ExitOnErr(cmd, "decode session: %v", errLast) - - pk := key.GetOrGenerate(cmd) - - err = stok.Sign(*pk) - commonCmd.ExitOnErr(cmd, "can't sign token: %w", err) - - data, err := stok.MarshalJSON() - commonCmd.ExitOnErr(cmd, "can't encode session token: %w", err) - - to := cmd.Flag(signToFlag).Value.String() - if len(to) == 0 { - common.PrettyPrintJSON(cmd, stok, "session token") - return - } - - err = os.WriteFile(to, data, 0o644) - if err != nil { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't write signed session token to %s: %w", to, err)) - } - - cmd.Printf("signed session token saved in %s\n", to) -} diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go deleted file mode 100644 index 13a747ba6..000000000 --- a/cmd/frostfs-ir/config.go +++ /dev/null @@ -1,131 +0,0 @@ -package main - -import ( - "context" - "os" - "os/signal" - "strconv" - "syscall" - - configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/spf13/cast" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -func newConfig() (*viper.Viper, error) { - var err error - dv := viper.New() - - defaultConfiguration(dv) - - _, err = configViper.CreateViper(configViper.WithConfigFile(*configFile), - configViper.WithConfigDir(*configDir), configViper.WithEnvPrefix(EnvPrefix), - configViper.WithViper(dv)) - if err != nil { - return nil, err - } - - return dv, err -} - -func reloadConfig() error { - err := configViper.ReloadViper(configViper.WithConfigFile(*configFile), - configViper.WithConfigDir(*configDir), configViper.WithEnvPrefix(EnvPrefix), - configViper.WithViper(cfg)) - if err != nil { - return err - } - cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) - audit.Store(cfg.GetBool("audit.enabled")) - var logPrm logger.Prm - err = logPrm.SetLevelString(cfg.GetString("logger.level")) - if err != nil { - return err - } - err = logPrm.SetTags(loggerTags()) - if err != nil { - return err - } - logger.UpdateLevelForTags(logPrm) - - return nil -} - -func loggerTags() [][]string { - var res [][]string - for i := 0; ; i++ { - var item []string - index := strconv.FormatInt(int64(i), 10) - names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) - if names == "" { - break - } - item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) - res = append(res, item) - } - return res -} - -func watchForSignal(ctx context.Context, cancel func()) { - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) - - sighupCh := make(chan os.Signal, 1) - signal.Notify(sighupCh, syscall.SIGHUP) - - for { - select { - // signals causing application to shut down should have priority over - // reconfiguration signal - case <-ch: - log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - cancel() - shutdown(ctx) - log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) - return - case err := <-intErr: // internal application error - log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error())) - cancel() - shutdown(ctx) - return - default: - // block until any signal is receieved - select { - case <-ch: - log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - cancel() - shutdown(ctx) - log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) - return - case err := <-intErr: // internal application error - log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error())) - cancel() - shutdown(ctx) - return - case <-sighupCh: - log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) - if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { - log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) - break - } - err := reloadConfig() - if err != nil { - log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) - } - pprofCmp.reload(ctx) - metricsCmp.reload(ctx) - log.Info(ctx, logs.FrostFSIRReloadExtraWallets) - err = innerRing.SetExtraWallets(cfg) - if err != nil { - log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) - } - innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) - log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) - } - } - } -} diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go deleted file mode 100644 index 9b775252f..000000000 --- a/cmd/frostfs-ir/defaults.go +++ /dev/null @@ -1,143 +0,0 @@ -package main - -import ( - "time" - - "github.com/spf13/viper" -) - -func defaultConfiguration(cfg *viper.Viper) { - cfg.SetDefault("logger.level", "info") - cfg.SetDefault("logger.destination", "stdout") - cfg.SetDefault("logger.timestamp", false) - - setPprofDefaults(cfg) - - setPrometheusDefaults(cfg) - - cfg.SetDefault("without_mainnet", false) - - cfg.SetDefault("node.persistent_state.path", ".frostfs-ir-state") - - setMorphDefaults(cfg) - - setMainNetDefaults(cfg) - - setWalletDefaults(cfg) - - setContractsDefaults(cfg) - - setTimersDefaults(cfg) - - setWorkersDefaults(cfg) - - setNetmapCleanerDefaults(cfg) - - setEmitDefaults(cfg) - - cfg.SetDefault("indexer.cache_timeout", 15*time.Second) - - cfg.SetDefault("locode.db.path", "") - - setFeeDefaults(cfg) - - setControlDefaults(cfg) - - cfg.SetDefault("governance.disable", false) - - cfg.SetDefault("node.kludge_compatibility_mode", false) - - cfg.SetDefault("audit.enabled", false) - - setMultinetDefaults(cfg) -} - -func setControlDefaults(cfg *viper.Viper) { - cfg.SetDefault("control.authorized_keys", []string{}) - cfg.SetDefault("control.grpc.endpoint", "") -} - -func setFeeDefaults(cfg *viper.Viper) { - // extra fee values for working mode without notary contract - cfg.SetDefault("fee.main_chain", 5000_0000) // 0.5 Fixed8 - cfg.SetDefault("fee.side_chain", 2_0000_0000) // 2.0 Fixed8 -} - -func setEmitDefaults(cfg *viper.Viper) { - cfg.SetDefault("emit.storage.amount", 0) - cfg.SetDefault("emit.mint.cache_size", 1000) - cfg.SetDefault("emit.mint.threshold", 1) - cfg.SetDefault("emit.mint.value", 20000000) // 0.2 Fixed8 - cfg.SetDefault("emit.gas.balance_threshold", 0) - cfg.SetDefault("emit.extra_wallets", nil) -} - -func setPprofDefaults(cfg *viper.Viper) { - cfg.SetDefault("pprof.address", "localhost:6060") - cfg.SetDefault("pprof.shutdown_timeout", "30s") -} - -func setPrometheusDefaults(cfg *viper.Viper) { - cfg.SetDefault("prometheus.address", "localhost:9090") - cfg.SetDefault("prometheus.shutdown_timeout", "30s") -} - -func setNetmapCleanerDefaults(cfg *viper.Viper) { - cfg.SetDefault("netmap_cleaner.enabled", true) - cfg.SetDefault("netmap_cleaner.threshold", 3) -} - -func setWorkersDefaults(cfg *viper.Viper) { - cfg.SetDefault("workers.netmap", "10") - cfg.SetDefault("workers.balance", "10") - cfg.SetDefault("workers.frostfs", "10") - cfg.SetDefault("workers.container", "10") - cfg.SetDefault("workers.alphabet", "10") -} - -func setTimersDefaults(cfg *viper.Viper) { - cfg.SetDefault("timers.emit", "0") - cfg.SetDefault("timers.stop_estimation.mul", 1) - cfg.SetDefault("timers.stop_estimation.div", 4) - cfg.SetDefault("timers.collect_basic_income.mul", 1) - cfg.SetDefault("timers.collect_basic_income.div", 2) - cfg.SetDefault("timers.distribute_basic_income.mul", 3) - cfg.SetDefault("timers.distribute_basic_income.div", 4) -} - -func setContractsDefaults(cfg *viper.Viper) { - cfg.SetDefault("contracts.netmap", "") - cfg.SetDefault("contracts.frostfs", "") - cfg.SetDefault("contracts.balance", "") - cfg.SetDefault("contracts.container", "") - cfg.SetDefault("contracts.proxy", "") - cfg.SetDefault("contracts.processing", "") - cfg.SetDefault("contracts.proxy", "") -} - -func setWalletDefaults(cfg *viper.Viper) { - cfg.SetDefault("wallet.path", "") // inner ring node NEP-6 wallet - cfg.SetDefault("wallet.address", "") // account address - cfg.SetDefault("wallet.password", "") // password -} - -func setMainNetDefaults(cfg *viper.Viper) { - cfg.SetDefault("mainnet.endpoint.client", []string{}) - cfg.SetDefault("mainnet.dial_timeout", 15*time.Second) - cfg.SetDefault("mainnet.switch_interval", 2*time.Minute) -} - -func setMorphDefaults(cfg *viper.Viper) { - cfg.SetDefault("morph.endpoint.client", []string{}) - cfg.SetDefault("morph.dial_timeout", 15*time.Second) - cfg.SetDefault("morph.validators", []string{}) - cfg.SetDefault("morph.switch_interval", 2*time.Minute) -} - -func setMultinetDefaults(cfg *viper.Viper) { - cfg.SetDefault("multinet.enabled", false) - cfg.SetDefault("multinet.balancer", "") - cfg.SetDefault("multinet.restrict", false) - cfg.SetDefault("multinet.fallback_delay", "0s") - cfg.SetDefault("multinet.subnets", "") -} diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go deleted file mode 100644 index dd70fc91c..000000000 --- a/cmd/frostfs-ir/httpcomponent.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "context" - "net/http" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" - "go.uber.org/zap" -) - -type httpComponent struct { - srv *httputil.Server - address string - name string - handler http.Handler - shutdownDur time.Duration - enabled bool -} - -const ( - enabledKeyPostfix = ".enabled" - addressKeyPostfix = ".address" - shutdownTimeoutKeyPostfix = ".shutdown_timeout" -) - -func (c *httpComponent) init(ctx context.Context) { - log.Info(ctx, "init "+c.name) - c.enabled = cfg.GetBool(c.name + enabledKeyPostfix) - c.address = cfg.GetString(c.name + addressKeyPostfix) - c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) - - if c.enabled { - c.srv = httputil.New( - httputil.HTTPSrvPrm{ - Address: c.address, - Handler: c.handler, - }, - httputil.WithShutdownTimeout(c.shutdownDur), - ) - } else { - log.Info(ctx, c.name+" is disabled, skip") - c.srv = nil - } -} - -func (c *httpComponent) start(ctx context.Context) { - if c.srv != nil { - log.Info(ctx, "start "+c.name) - wg.Add(1) - go func() { - defer wg.Done() - exitErr(c.srv.Serve()) - }() - } -} - -func (c *httpComponent) shutdown(ctx context.Context) error { - if c.srv != nil { - log.Info(ctx, "shutdown "+c.name) - return c.srv.Shutdown(ctx) - } - return nil -} - -func (c *httpComponent) needReload() bool { - enabled := cfg.GetBool(c.name + enabledKeyPostfix) - address := cfg.GetString(c.name + addressKeyPostfix) - dur := cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) - return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur) -} - -func (c *httpComponent) reload(ctx context.Context) { - log.Info(ctx, "reload "+c.name) - if c.needReload() { - log.Info(ctx, c.name+" config updated") - if err := c.shutdown(ctx); err != nil { - log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.Error(err), - ) - } else { - c.init(ctx) - c.start(ctx) - } - } -} diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go deleted file mode 100644 index 799feb784..000000000 --- a/cmd/frostfs-ir/main.go +++ /dev/null @@ -1,138 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "os" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - irMetrics "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -const ( - // ErrorReturnCode returns when application crashed at initialization stage. - ErrorReturnCode = 1 - - // SuccessReturnCode returns when application closed without panic. - SuccessReturnCode = 0 - - EnvPrefix = "FROSTFS_IR" -) - -var ( - wg = new(sync.WaitGroup) - intErr = make(chan error) // internal inner ring errors - innerRing *innerring.Server - pprofCmp *pprofComponent - metricsCmp *httpComponent - log *logger.Logger - cfg *viper.Viper - configFile *string - configDir *string - cmode = &atomic.Bool{} - audit = &atomic.Bool{} -) - -func exitErr(err error) { - if err != nil { - fmt.Println(err) - os.Exit(ErrorReturnCode) - } -} - -func main() { - configFile = flag.String("config", "", "path to config") - configDir = flag.String("config-dir", "", "path to config directory") - versionFlag := flag.Bool("version", false, "frostfs-ir node version") - flag.Parse() - - if *versionFlag { - fmt.Print(misc.BuildInfo("FrostFS Inner Ring node")) - - os.Exit(SuccessReturnCode) - } - - var err error - cfg, err = newConfig() - exitErr(err) - - cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) - - metrics := irMetrics.NewInnerRingMetrics() - - var logPrm logger.Prm - err = logPrm.SetLevelString( - cfg.GetString("logger.level"), - ) - exitErr(err) - err = logPrm.SetDestination( - cfg.GetString("logger.destination"), - ) - exitErr(err) - logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() - logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") - err = logPrm.SetTags(loggerTags()) - exitErr(err) - - log, err = logger.NewLogger(logPrm) - exitErr(err) - - logger.UpdateLevelForTags(logPrm) - - ctx, cancel := context.WithCancel(context.Background()) - - pprofCmp = newPprofComponent() - pprofCmp.init(ctx) - - metricsCmp = newMetricsComponent() - metricsCmp.init(ctx) - audit.Store(cfg.GetBool("audit.enabled")) - - innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit) - exitErr(err) - - pprofCmp.start(ctx) - metricsCmp.start(ctx) - - // start inner ring - err = innerRing.Start(ctx, intErr) - exitErr(err) - - log.Info(ctx, logs.CommonApplicationStarted, - zap.String("version", misc.Version)) - - watchForSignal(ctx, cancel) - - <-ctx.Done() // graceful shutdown - log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop) - wg.Wait() - - log.Info(ctx, logs.FrostFSIRApplicationStopped) -} - -func shutdown(ctx context.Context) { - innerRing.Stop(ctx) - if err := metricsCmp.shutdown(ctx); err != nil { - log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.Error(err), - ) - } - if err := pprofCmp.shutdown(ctx); err != nil { - log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.Error(err), - ) - } - - if err := sdnotify.ClearStatus(); err != nil { - log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) - } -} diff --git a/cmd/frostfs-ir/metrics.go b/cmd/frostfs-ir/metrics.go deleted file mode 100644 index dd982b780..000000000 --- a/cmd/frostfs-ir/metrics.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" -) - -func newMetricsComponent() *httpComponent { - return &httpComponent{ - name: "prometheus", - handler: metrics.Handler(), - } -} diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go deleted file mode 100644 index 2aebcde7f..000000000 --- a/cmd/frostfs-ir/pprof.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "context" - "runtime" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" - "go.uber.org/zap" -) - -type pprofComponent struct { - httpComponent - blockRate int - mutexRate int -} - -const ( - pprofBlockRateKey = "pprof.block_rate" - pprofMutexRateKey = "pprof.mutex_rate" -) - -func newPprofComponent() *pprofComponent { - return &pprofComponent{ - httpComponent: httpComponent{ - name: "pprof", - handler: httputil.Handler(), - }, - } -} - -func (c *pprofComponent) init(ctx context.Context) { - c.httpComponent.init(ctx) - - if c.enabled { - c.blockRate = cfg.GetInt(pprofBlockRateKey) - c.mutexRate = cfg.GetInt(pprofMutexRateKey) - runtime.SetBlockProfileRate(c.blockRate) - runtime.SetMutexProfileFraction(c.mutexRate) - } else { - c.blockRate = 0 - c.mutexRate = 0 - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(0) - } -} - -func (c *pprofComponent) needReload() bool { - blockRate := cfg.GetInt(pprofBlockRateKey) - mutexRate := cfg.GetInt(pprofMutexRateKey) - return c.httpComponent.needReload() || - c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate) -} - -func (c *pprofComponent) reload(ctx context.Context) { - log.Info(ctx, "reload "+c.name) - if c.needReload() { - log.Info(ctx, c.name+" config updated") - if err := c.shutdown(ctx); err != nil { - log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.Error(err)) - return - } - - c.init(ctx) - c.start(ctx) - } -} diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go deleted file mode 100644 index e7e2c0769..000000000 --- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go +++ /dev/null @@ -1,48 +0,0 @@ -package blobovnicza - -import ( - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var inspectCMD = &cobra.Command{ - Use: "inspect", - Short: "Object inspection", - Long: `Inspect specific object in a blobovnicza.`, - Run: inspectFunc, -} - -func init() { - common.AddAddressFlag(inspectCMD, &vAddress) - common.AddComponentPathFlag(inspectCMD, &vPath) - common.AddOutputFileFlag(inspectCMD, &vOut) -} - -func inspectFunc(cmd *cobra.Command, _ []string) { - var addr oid.Address - - err := addr.DecodeString(vAddress) - common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) - - blz := openBlobovnicza(cmd) - defer blz.Close(cmd.Context()) - - var prm blobovnicza.GetPrm - prm.SetAddress(addr) - - res, err := blz.Get(cmd.Context(), prm) - common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err)) - - data := res.Object() - - var o objectSDK.Object - common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w", - o.Unmarshal(data)), - ) - - common.PrintObjectHeader(cmd, o) - common.WriteObjectToFile(cmd, vOut, data) -} diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go deleted file mode 100644 index d41a15bcf..000000000 --- a/cmd/frostfs-lens/internal/blobovnicza/list.go +++ /dev/null @@ -1,39 +0,0 @@ -package blobovnicza - -import ( - "context" - "fmt" - "io" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var listCMD = &cobra.Command{ - Use: "list", - Short: "Object listing", - Long: `List all objects stored in a blobovnicza.`, - Run: listFunc, -} - -func init() { - common.AddComponentPathFlag(listCMD, &vPath) -} - -func listFunc(cmd *cobra.Command, _ []string) { - // other targets can be supported - w := cmd.OutOrStderr() - - wAddr := func(addr oid.Address) error { - _, err := io.WriteString(w, fmt.Sprintf("%s\n", addr)) - return err - } - - blz := openBlobovnicza(cmd) - defer blz.Close(cmd.Context()) - - err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr) - common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err)) -} diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go deleted file mode 100644 index 2819981d6..000000000 --- a/cmd/frostfs-lens/internal/blobovnicza/root.go +++ /dev/null @@ -1,33 +0,0 @@ -package blobovnicza - -import ( - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "github.com/spf13/cobra" -) - -var ( - vAddress string - vPath string - vOut string -) - -// Root contains `blobovnicza` command definition. -var Root = &cobra.Command{ - Use: "blobovnicza", - Short: "Operations with a blobovnicza", -} - -func init() { - Root.AddCommand(listCMD, inspectCMD, tuiCMD) -} - -func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza { - blz := blobovnicza.New( - blobovnicza.WithPath(vPath), - blobovnicza.WithReadOnly(true), - ) - common.ExitOnErr(cmd, blz.Open(cmd.Context())) - - return blz -} diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go deleted file mode 100644 index 4aa281616..000000000 --- a/cmd/frostfs-lens/internal/blobovnicza/tui.go +++ /dev/null @@ -1,68 +0,0 @@ -package blobovnicza - -import ( - "context" - "fmt" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" - "github.com/rivo/tview" - "github.com/spf13/cobra" -) - -var tuiCMD = &cobra.Command{ - Use: "explore", - Short: "Blobovnicza exploration with a terminal UI", - Long: `Launch a terminal UI to explore blobovnicza and search for data. - -Available search filters: -- cid CID -- oid OID -- addr CID/OID -`, - Run: tuiFunc, -} - -var initialPrompt string - -func init() { - common.AddComponentPathFlag(tuiCMD, &vPath) - - tuiCMD.Flags().StringVar( - &initialPrompt, - "filter", - "", - "Filter prompt to start with, format 'tag:value [+ tag:value]...'", - ) -} - -func tuiFunc(cmd *cobra.Command, _ []string) { - common.ExitOnErr(cmd, runTUI(cmd)) -} - -func runTUI(cmd *cobra.Command) error { - db, err := tui.OpenDB(vPath, false) - if err != nil { - return fmt.Errorf("couldn't open database: %w", err) - } - defer db.Close() - - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() - - app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, schema.BlobovniczaParser, nil) - - _ = ui.AddFilter("cid", tui.CIDParser, "CID") - _ = ui.AddFilter("oid", tui.OIDParser, "OID") - _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID") - - err = ui.WithPrompt(initialPrompt) - if err != nil { - return fmt.Errorf("invalid filter prompt: %w", err) - } - - app.SetRoot(ui, true).SetFocus(ui) - return app.Run() -} diff --git a/cmd/frostfs-lens/internal/errors.go b/cmd/frostfs-lens/internal/errors.go deleted file mode 100644 index 536ba2031..000000000 --- a/cmd/frostfs-lens/internal/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -package common - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" -) - -// Errf returns formatted error in errFmt format if err is not nil. -func Errf(errFmt string, err error) error { - if err == nil { - return nil - } - - return fmt.Errorf(errFmt, err) -} - -// ExitOnErr calls exitOnErrCode with code 1. -func ExitOnErr(cmd *cobra.Command, err error) { - exitOnErrCode(cmd, err, 1) -} - -// exitOnErrCode prints error via cmd and calls os.Exit with passed exit code. -// Does nothing if err is nil. -func exitOnErrCode(cmd *cobra.Command, err error, code int) { - if err != nil { - cmd.PrintErrln(err) - os.Exit(code) - } -} diff --git a/cmd/frostfs-lens/internal/flags.go b/cmd/frostfs-lens/internal/flags.go deleted file mode 100644 index 8a987a2d4..000000000 --- a/cmd/frostfs-lens/internal/flags.go +++ /dev/null @@ -1,35 +0,0 @@ -package common - -import ( - "github.com/spf13/cobra" -) - -const ( - flagAddress = "address" - flagEnginePath = "path" - flagOutFile = "out" -) - -// AddAddressFlag adds the address flag to the passed cobra command. -func AddAddressFlag(cmd *cobra.Command, v *string) { - cmd.Flags().StringVar(v, flagAddress, "", "Object address") - _ = cmd.MarkFlagRequired(flagAddress) -} - -// AddComponentPathFlag adds the path-to-component flag to the -// passed cobra command. -func AddComponentPathFlag(cmd *cobra.Command, v *string) { - cmd.Flags().StringVar(v, flagEnginePath, "", - "Path to storage engine component", - ) - _ = cmd.MarkFlagFilename(flagEnginePath) - _ = cmd.MarkFlagRequired(flagEnginePath) -} - -// AddOutputFileFlag adds the output file flag to the passed cobra -// command. -func AddOutputFileFlag(cmd *cobra.Command, v *string) { - cmd.Flags().StringVar(v, flagOutFile, "", - "File to save object payload") - _ = cmd.MarkFlagFilename(flagOutFile) -} diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go deleted file mode 100644 index f436343c7..000000000 --- a/cmd/frostfs-lens/internal/meta/inspect.go +++ /dev/null @@ -1,74 +0,0 @@ -package meta - -import ( - "errors" - "fmt" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var inspectCMD = &cobra.Command{ - Use: "inspect", - Short: "Object inspection", - Long: `Inspect specific object in a metabase.`, - Run: inspectFunc, -} - -func init() { - common.AddAddressFlag(inspectCMD, &vAddress) - common.AddComponentPathFlag(inspectCMD, &vPath) -} - -func inspectFunc(cmd *cobra.Command, _ []string) { - var addr oid.Address - - err := addr.DecodeString(vAddress) - common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) - - db := openMeta(cmd) - defer db.Close(cmd.Context()) - - storageID := meta.StorageIDPrm{} - storageID.SetAddress(addr) - - resStorageID, err := db.StorageID(cmd.Context(), storageID) - common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err)) - - if id := resStorageID.StorageID(); id != nil { - cmd.Printf("Object storageID: %s\n\n", blobovniczatree.NewIDFromBytes(id).Path()) - } else { - cmd.Printf("Object does not contain storageID\n\n") - } - - prm := meta.GetPrm{} - prm.SetAddress(addr) - prm.SetRaw(true) - - siErr := new(objectSDK.SplitInfoError) - - res, err := db.Get(cmd.Context(), prm) - if errors.As(err, &siErr) { - link, linkSet := siErr.SplitInfo().Link() - last, lastSet := siErr.SplitInfo().LastPart() - - fmt.Println("Object is split") - cmd.Println("\tSplitID:", siErr.SplitInfo().SplitID().String()) - - if linkSet { - cmd.Println("\tLink:", link) - } - if lastSet { - cmd.Println("\tLast:", last) - } - - return - } - common.ExitOnErr(cmd, common.Errf("could not get object: %w", err)) - - common.PrintObjectHeader(cmd, *res.Header()) -} diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go deleted file mode 100644 index 6b27a232f..000000000 --- a/cmd/frostfs-lens/internal/meta/list-garbage.go +++ /dev/null @@ -1,33 +0,0 @@ -package meta - -import ( - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "github.com/spf13/cobra" -) - -var listGarbageCMD = &cobra.Command{ - Use: "list-garbage", - Short: "Garbage listing", - Long: `List all the objects that have received GC Mark.`, - Run: listGarbageFunc, -} - -func init() { - common.AddComponentPathFlag(listGarbageCMD, &vPath) -} - -func listGarbageFunc(cmd *cobra.Command, _ []string) { - db := openMeta(cmd) - defer db.Close(cmd.Context()) - - var garbPrm meta.GarbageIterationPrm - garbPrm.SetHandler( - func(garbageObject meta.GarbageObject) error { - cmd.Println(garbageObject.Address().EncodeToString()) - return nil - }) - - err := db.IterateOverGarbage(cmd.Context(), garbPrm) - common.ExitOnErr(cmd, common.Errf("could not iterate over garbage bucket: %w", err)) -} diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go deleted file mode 100644 index 45642e74b..000000000 --- a/cmd/frostfs-lens/internal/meta/list-graveyard.go +++ /dev/null @@ -1,38 +0,0 @@ -package meta - -import ( - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "github.com/spf13/cobra" -) - -var listGraveyardCMD = &cobra.Command{ - Use: "list-graveyard", - Short: "Graveyard listing", - Long: `List all the objects that have been covered with a Tomb Stone.`, - Run: listGraveyardFunc, -} - -func init() { - common.AddComponentPathFlag(listGraveyardCMD, &vPath) -} - -func listGraveyardFunc(cmd *cobra.Command, _ []string) { - db := openMeta(cmd) - defer db.Close(cmd.Context()) - - var gravePrm meta.GraveyardIterationPrm - gravePrm.SetHandler( - func(tsObj meta.TombstonedObject) error { - cmd.Printf( - "Object: %s\nTS: %s\n", - tsObj.Address().EncodeToString(), - tsObj.Tombstone().EncodeToString(), - ) - - return nil - }) - - err := db.IterateOverGraveyard(cmd.Context(), gravePrm) - common.ExitOnErr(cmd, common.Errf("could not iterate over graveyard bucket: %w", err)) -} diff --git a/cmd/frostfs-lens/internal/meta/root.go b/cmd/frostfs-lens/internal/meta/root.go deleted file mode 100644 index 351d1ce80..000000000 --- a/cmd/frostfs-lens/internal/meta/root.go +++ /dev/null @@ -1,51 +0,0 @@ -package meta - -import ( - "time" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/spf13/cobra" - "go.etcd.io/bbolt" -) - -var ( - vAddress string - vPath string -) - -type epochState struct{} - -func (s epochState) CurrentEpoch() uint64 { - return 0 -} - -// Root contains `meta` command definition. -var Root = &cobra.Command{ - Use: "meta", - Short: "Operations with a metabase", -} - -func init() { - Root.AddCommand( - inspectCMD, - listGraveyardCMD, - listGarbageCMD, - tuiCMD, - ) -} - -func openMeta(cmd *cobra.Command) *meta.DB { - db := meta.New( - meta.WithPath(vPath), - meta.WithBoltDBOptions(&bbolt.Options{ - ReadOnly: true, - Timeout: 100 * time.Millisecond, - }), - meta.WithEpochState(epochState{}), - ) - common.ExitOnErr(cmd, common.Errf("could not open metabase: %w", db.Open(cmd.Context(), mode.ReadOnly))) - - return db -} diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go deleted file mode 100644 index 7b0e25f3d..000000000 --- a/cmd/frostfs-lens/internal/meta/tui.go +++ /dev/null @@ -1,118 +0,0 @@ -package meta - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" - "github.com/rivo/tview" - "github.com/spf13/cobra" - "go.etcd.io/bbolt" -) - -var tuiCMD = &cobra.Command{ - Use: "explore", - Short: "Metabase exploration with a terminal UI", - Long: `Launch a terminal UI to explore metabase and search for data. - -Available search filters: -- cid CID -- oid OID -- addr CID/OID -- attr key[/value] -`, - Run: tuiFunc, -} - -var initialPrompt string - -var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{ - 2: schema.MetabaseParserV2, - 3: schema.MetabaseParserV3, -} - -func init() { - common.AddComponentPathFlag(tuiCMD, &vPath) - - tuiCMD.Flags().StringVar( - &initialPrompt, - "filter", - "", - "Filter prompt to start with, format 'tag:value [+ tag:value]...'", - ) -} - -func tuiFunc(cmd *cobra.Command, _ []string) { - common.ExitOnErr(cmd, runTUI(cmd)) -} - -func runTUI(cmd *cobra.Command) error { - db, err := tui.OpenDB(vPath, false) - if err != nil { - return fmt.Errorf("couldn't open database: %w", err) - } - defer db.Close() - - schemaVersion, hasVersion := lookupSchemaVersion(cmd, db) - if !hasVersion { - return errors.New("couldn't detect schema version") - } - - metabaseParser, ok := parserPerSchemaVersion[schemaVersion] - if !ok { - return fmt.Errorf("unknown schema version %d", schemaVersion) - } - - // Need if app was stopped with Ctrl-C. - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() - - app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, metabaseParser, nil) - - _ = ui.AddFilter("cid", tui.CIDParser, "CID") - _ = ui.AddFilter("oid", tui.OIDParser, "OID") - _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID") - _ = ui.AddCompositeFilter("attr", tui.AttributeParser, "key[/value]") - - err = ui.WithPrompt(initialPrompt) - if err != nil { - return fmt.Errorf("invalid filter prompt: %w", err) - } - - app.SetRoot(ui, true).SetFocus(ui) - return app.Run() -} - -var ( - shardInfoBucket = []byte{5} - versionRecord = []byte("version") -) - -func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) { - err := db.View(func(tx *bbolt.Tx) error { - bkt := tx.Bucket(shardInfoBucket) - if bkt == nil { - return nil - } - rec := bkt.Get(versionRecord) - if rec == nil { - return nil - } - - version = binary.LittleEndian.Uint64(rec) - ok = true - - return nil - }) - if err != nil { - common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err)) - } - - return -} diff --git a/cmd/frostfs-lens/internal/printers.go b/cmd/frostfs-lens/internal/printers.go deleted file mode 100644 index ea0cbc8e0..000000000 --- a/cmd/frostfs-lens/internal/printers.go +++ /dev/null @@ -1,65 +0,0 @@ -package common - -import ( - "os" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -// PrintObjectHeader prints passed object's header fields via -// the passed cobra command. Does nothing with the payload. -func PrintObjectHeader(cmd *cobra.Command, h objectSDK.Object) { - cmd.Println("Version:", h.Version()) - cmd.Println("Type:", h.Type()) - printContainerID(cmd, h.ContainerID) - printObjectID(cmd, h.ID) - cmd.Println("Owner:", h.OwnerID()) - cmd.Println("CreatedAt:", h.CreationEpoch()) - cmd.Println("PayloadSize:", h.PayloadSize()) - cmd.Println("Attributes:") - for _, attr := range h.Attributes() { - cmd.Printf(" %s: %s\n", attr.Key(), attr.Value()) - } -} - -func printContainerID(cmd *cobra.Command, recv func() (cid.ID, bool)) { - var val string - - id, ok := recv() - if ok { - val = id.String() - } else { - val = "" - } - - cmd.Println("CID:", val) -} - -func printObjectID(cmd *cobra.Command, recv func() (oid.ID, bool)) { - var val string - - id, ok := recv() - if ok { - val = id.String() - } else { - val = "" - } - - cmd.Println("ID:", val) -} - -// WriteObjectToFile writes object to the provided path. Does nothing if -// the path is empty. -func WriteObjectToFile(cmd *cobra.Command, path string, data []byte) { - if path == "" { - return - } - - ExitOnErr(cmd, Errf("could not write file: %w", - os.WriteFile(path, data, 0o644))) - - cmd.Printf("\nSaved payload to '%s' file\n", path) -} diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go deleted file mode 100644 index 02b6cf414..000000000 --- a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go +++ /dev/null @@ -1,96 +0,0 @@ -package blobovnicza - -import ( - "encoding/binary" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" -) - -var BlobovniczaParser = common.WithFallback( - common.Any( - MetaBucketParser, - BucketParser, - ), - common.RawParser.ToFallbackParser(), -) - -func MetaBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, errors.New("not a bucket") - } - - if string(key) != "META" { - return nil, nil, errors.New("invalid bucket name") - } - - return &MetaBucket{}, MetaRecordParser, nil -} - -func MetaRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var r MetaRecord - - if len(key) == 0 { - return nil, nil, errors.New("invalid key") - } - - r.label = string(key) - r.count = binary.LittleEndian.Uint64(value) - - return &r, nil, nil -} - -func BucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, errors.New("not a bucket") - } - - size, n := binary.Varint(key) - if n <= 0 { - return nil, nil, errors.New("invalid size") - } - - return &Bucket{size: size}, RecordParser, nil -} - -func RecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - parts := strings.Split(string(key), "/") - - if len(parts) != 2 { - return nil, nil, errors.New("invalid key, expected address string /") - } - - cnrRaw, err := base58.Decode(parts[0]) - if err != nil { - return nil, nil, errors.New("can't decode CID string") - } - objRaw, err := base58.Decode(parts[1]) - if err != nil { - return nil, nil, errors.New("can't decode OID string") - } - - cnr := cid.ID{} - if err := cnr.Decode(cnrRaw); err != nil { - return nil, nil, fmt.Errorf("can't decode CID: %w", err) - } - obj := oid.ID{} - if err := obj.Decode(objRaw); err != nil { - return nil, nil, fmt.Errorf("can't decode OID: %w", err) - } - - var r Record - - r.addr.SetContainer(cnr) - r.addr.SetObject(obj) - - if err := r.object.Unmarshal(value); err != nil { - return nil, nil, errors.New("can't unmarshal object") - } - - return &r, nil, nil -} diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go deleted file mode 100644 index c7ed08cdd..000000000 --- a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go +++ /dev/null @@ -1,101 +0,0 @@ -package blobovnicza - -import ( - "fmt" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/davecgh/go-spew/spew" - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -type ( - MetaBucket struct{} - - MetaRecord struct { - label string - count uint64 - } - - Bucket struct { - size int64 - } - - Record struct { - addr oid.Address - object objectSDK.Object - } -) - -func (b *MetaBucket) String() string { - return common.FormatSimple("META", tcell.ColorLime) -} - -func (b *MetaBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *MetaBucket) Filter(string, any) common.FilterResult { - return common.No -} - -func (r *MetaRecord) String() string { - return fmt.Sprintf("%-11s %c %d", r.label, tview.Borders.Vertical, r.count) -} - -func (r *MetaRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *MetaRecord) Filter(string, any) common.FilterResult { - return common.No -} - -func (b *Bucket) String() string { - return common.FormatSimple(strconv.FormatInt(b.size, 10), tcell.ColorLime) -} - -func (b *Bucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *Bucket) Filter(typ string, _ any) common.FilterResult { - switch typ { - case "cid": - return common.Maybe - case "oid": - return common.Maybe - default: - return common.No - } -} - -func (r *Record) String() string { - return fmt.Sprintf( - "CID %s OID %s %c Object {...}", - common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua), - tview.Borders.Vertical, - ) -} - -func (r *Record) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *Record) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No) - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No) - default: - return common.No - } -} diff --git a/cmd/frostfs-lens/internal/schema/common/format.go b/cmd/frostfs-lens/internal/schema/common/format.go deleted file mode 100644 index 4ed7e96f2..000000000 --- a/cmd/frostfs-lens/internal/schema/common/format.go +++ /dev/null @@ -1,43 +0,0 @@ -package common - -import ( - "fmt" - "strconv" - - "github.com/gdamore/tcell/v2" -) - -type FormatOptions struct { - Color tcell.Color - - Bold, - Italic, - Underline, - StrikeThrough bool -} - -func Format(s string, opts FormatOptions) string { - var boldTag, italicTag, underlineTag, strikeThroughTag string - - switch { - case opts.Bold: - boldTag = "b" - case opts.Italic: - italicTag = "i" - case opts.Underline: - underlineTag = "u" - case opts.StrikeThrough: - strikeThroughTag = "s" - } - - attrs := fmt.Sprintf( - "%s%s%s%s", boldTag, italicTag, underlineTag, strikeThroughTag, - ) - color := strconv.FormatInt(int64(opts.Color.Hex()), 16) - - return fmt.Sprintf("[#%06s::%s]%s[-::-]", color, attrs, s) -} - -func FormatSimple(s string, c tcell.Color) string { - return Format(s, FormatOptions{Color: c}) -} diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go deleted file mode 100644 index 55051554c..000000000 --- a/cmd/frostfs-lens/internal/schema/common/raw.go +++ /dev/null @@ -1,31 +0,0 @@ -package common - -import ( - "github.com/davecgh/go-spew/spew" - "github.com/gdamore/tcell/v2" - "github.com/mr-tron/base58" -) - -type RawEntry struct { - // key and value used for record dump. - // nolint:unused - key, value []byte -} - -var RawParser Parser = rawParser - -func rawParser(key, value []byte) (SchemaEntry, Parser, error) { - return &RawEntry{key: key, value: value}, rawParser, nil -} - -func (r *RawEntry) String() string { - return FormatSimple(base58.Encode(r.key), tcell.ColorRed) -} - -func (r *RawEntry) DetailedString() string { - return spew.Sdump(r) -} - -func (r *RawEntry) Filter(string, any) FilterResult { - return No -} diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go deleted file mode 100644 index 077a68785..000000000 --- a/cmd/frostfs-lens/internal/schema/common/schema.go +++ /dev/null @@ -1,79 +0,0 @@ -package common - -import ( - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" -) - -type FilterResult byte - -const ( - No FilterResult = iota - Maybe - Yes -) - -func IfThenElse(condition bool, onSuccess, onFailure FilterResult) FilterResult { - var res FilterResult - if condition { - res = onSuccess - } else { - res = onFailure - } - return res -} - -type SchemaEntry interface { - String() string - DetailedString() string - Filter(typ string, val any) FilterResult -} - -type ( - Parser func(key, value []byte) (SchemaEntry, Parser, error) - FallbackParser func(key, value []byte) (SchemaEntry, Parser) -) - -func Any(parsers ...Parser) Parser { - return func(key, value []byte) (SchemaEntry, Parser, error) { - var errs error - for _, parser := range parsers { - ret, next, err := parser(key, value) - if err == nil { - return ret, next, nil - } - errs = errors.Join(errs, err) - } - return nil, nil, fmt.Errorf("no parser succeeded: %w", errs) - } -} - -func WithFallback(parser Parser, fallback FallbackParser) Parser { - if parser == nil { - return fallback.ToParser() - } - return func(key, value []byte) (SchemaEntry, Parser, error) { - entry, next, err := parser(key, value) - if err == nil { - return entry, WithFallback(next, fallback), nil - } - return fallback.ToParser()(key, value) - } -} - -func (fp FallbackParser) ToParser() Parser { - return func(key, value []byte) (SchemaEntry, Parser, error) { - entry, next := fp(key, value) - return entry, next, nil - } -} - -func (p Parser) ToFallbackParser() FallbackParser { - return func(key, value []byte) (SchemaEntry, Parser) { - entry, next, err := p(key, value) - assert.NoError(err, "couldn't use that parser as a fallback parser") - return entry, next - } -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go deleted file mode 100644 index 6a08a723e..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go +++ /dev/null @@ -1,29 +0,0 @@ -package buckets - -import ( - "github.com/davecgh/go-spew/spew" -) - -func (b *PrefixBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *PrefixContainerBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *UserBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *ContainerBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *UserAttributeKeyBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (b *UserAttributeValueBucket) DetailedString() string { - return spew.Sdump(*b) -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go deleted file mode 100644 index 891c4004f..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go +++ /dev/null @@ -1,81 +0,0 @@ -package buckets - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -func (b *PrefixBucket) Filter(typ string, _ any) common.FilterResult { - switch typ { - case "cid": - return b.resolvers.cidResolver(false) - case "oid": - return b.resolvers.oidResolver(false) - default: - return common.No - } -} - -func (b *PrefixContainerBucket) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return b.resolvers.cidResolver(b.id.Equals(id)) - case "oid": - return b.resolvers.oidResolver(false) - default: - return common.No - } -} - -func (b *UserBucket) Filter(typ string, _ any) common.FilterResult { - switch typ { - case "cid": - return b.resolvers.cidResolver(false) - case "oid": - return b.resolvers.oidResolver(false) - default: - return common.No - } -} - -func (b *ContainerBucket) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return b.resolvers.cidResolver(b.id.Equals(id)) - case "oid": - return b.resolvers.oidResolver(false) - default: - return common.No - } -} - -func (b *UserAttributeKeyBucket) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(b.id.Equals(id), common.Yes, common.No) - case "oid": - return common.Maybe - case "key": - key := val.(string) - return common.IfThenElse(b.key == key, common.Yes, common.No) - case "value": - return common.Maybe - default: - return common.No - } -} - -func (b *UserAttributeValueBucket) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - return common.Maybe - case "value": - value := val.(string) - return common.IfThenElse(b.value == value, common.Yes, common.No) - default: - return common.No - } -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go deleted file mode 100644 index 4e6bbf08a..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go +++ /dev/null @@ -1,126 +0,0 @@ -package buckets - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/records" -) - -var ( - GraveyardParser = NewPrefixBucketParser(Graveyard, records.GraveyardRecordParser, Resolvers{ - cidResolver: LenientResolver, - oidResolver: LenientResolver, - }) - - GarbageParser = NewPrefixBucketParser(Garbage, records.GarbageRecordParser, Resolvers{ - cidResolver: LenientResolver, - oidResolver: LenientResolver, - }) - - ContainerVolumeParser = NewPrefixBucketParser(ContainerVolume, records.ContainerVolumeRecordParser, Resolvers{ - cidResolver: LenientResolver, - oidResolver: StrictResolver, - }) - - LockedParser = NewPrefixBucketParser( - Locked, - NewContainerBucketParser( - records.LockedRecordParser, - Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }, - ), - Resolvers{ - cidResolver: LenientResolver, - oidResolver: LenientResolver, - }, - ) - - ShardInfoParser = NewPrefixBucketParser(ShardInfo, records.ShardInfoRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: StrictResolver, - }) - - PrimaryParser = NewPrefixContainerBucketParser(Primary, records.ObjectRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - LockersParser = NewPrefixContainerBucketParser(Lockers, records.ObjectRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - TombstoneParser = NewPrefixContainerBucketParser(Tombstone, records.ObjectRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - SmallParser = NewPrefixContainerBucketParser(Small, records.SmallRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - RootParser = NewPrefixContainerBucketParser(Root, records.RootRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - OwnerParser = NewPrefixContainerBucketParser( - Owner, - NewUserBucketParser( - records.OwnerRecordParser, - Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }, - ), - Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }, - ) - - UserAttributeParserV2 = NewUserAttributeKeyBucketParser( - NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), - ) - - UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys( - NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), - []string{"FilePath", "S3-Access-Box-CRDT-Name"}, - ) - - PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: StrictResolver, - }) - - ParentParser = NewPrefixContainerBucketParser(Parent, records.ParentRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - SplitParser = NewPrefixContainerBucketParser(Split, records.SplitRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: StrictResolver, - }) - - ContainerCountersParser = NewPrefixBucketParser(ContainerCounters, records.ContainerCountersRecordParser, Resolvers{ - cidResolver: LenientResolver, - oidResolver: StrictResolver, - }) - - ECInfoParser = NewPrefixContainerBucketParser(ECInfo, records.ECInfoRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) - - ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{ - cidResolver: LenientResolver, - oidResolver: LenientResolver, - }) - - ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) -) diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go deleted file mode 100644 index 42a24c594..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go +++ /dev/null @@ -1,57 +0,0 @@ -package buckets - -type Prefix byte - -const ( - Graveyard Prefix = iota - Garbage - ToMoveIt - ContainerVolume - Locked - ShardInfo - Primary - Lockers - _ - Tombstone - Small - Root - Owner - UserAttribute - PayloadHash - Parent - Split - ContainerCounters - ECInfo - ExpirationEpochToObject - ObjectToExpirationEpoch -) - -var x = map[Prefix]string{ - Graveyard: "Graveyard", - Garbage: "Garbage", - ToMoveIt: "To Move It", - ContainerVolume: "Container Volume", - Locked: "Locked", - ShardInfo: "Shard Info", - Primary: "Primary", - Lockers: "Lockers", - Tombstone: "Tombstone", - Small: "Small", - Root: "Root", - Owner: "Owner", - UserAttribute: "User Attribute", - PayloadHash: "Payload Hash", - Parent: "Parent", - Split: "Split", - ContainerCounters: "Container Counters", - ECInfo: "EC Info", - ExpirationEpochToObject: "Exp. Epoch to Object", - ObjectToExpirationEpoch: "Object to Exp. Epoch", -} - -func (p Prefix) String() string { - if s, ok := x[p]; ok { - return s - } - return "Unknown Prefix" -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go deleted file mode 100644 index 62d126f88..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go +++ /dev/null @@ -1,48 +0,0 @@ -package buckets - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "github.com/gdamore/tcell/v2" -) - -func (b *PrefixBucket) String() string { - return common.FormatSimple( - fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, - ) -} - -func (b *PrefixContainerBucket) String() string { - return fmt.Sprintf( - "%s CID %s", - common.FormatSimple( - fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, - ), - common.FormatSimple(b.id.String(), tcell.ColorAqua), - ) -} - -func (b *UserBucket) String() string { - return "UID " + common.FormatSimple(b.id.String(), tcell.ColorAqua) -} - -func (b *ContainerBucket) String() string { - return "CID " + common.FormatSimple(b.id.String(), tcell.ColorAqua) -} - -func (b *UserAttributeKeyBucket) String() string { - return fmt.Sprintf("%s CID %s ATTR-KEY %s", - common.FormatSimple( - fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, - ), - common.FormatSimple( - fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, - ), - common.FormatSimple(b.key, tcell.ColorAqua), - ) -} - -func (b *UserAttributeValueBucket) String() string { - return "ATTR-VALUE " + common.FormatSimple(b.value, tcell.ColorAqua) -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go deleted file mode 100644 index 7355c3d9e..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go +++ /dev/null @@ -1,177 +0,0 @@ -package buckets - -import ( - "errors" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/mr-tron/base58" -) - -type ( - PrefixBucket struct { - prefix Prefix - resolvers Resolvers - } - - PrefixContainerBucket struct { - prefix Prefix - id cid.ID - resolvers Resolvers - } - - ContainerBucket struct { - id cid.ID - resolvers Resolvers - } - - UserBucket struct { - id user.ID - resolvers Resolvers - } - - UserAttributeKeyBucket struct { - prefix Prefix - id cid.ID - key string - } - - UserAttributeValueBucket struct { - value string - } -) - -type ( - FilterResolver = func(result bool) common.FilterResult - - Resolvers struct { - cidResolver FilterResolver - oidResolver FilterResolver - } -) - -var ( - StrictResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.No) } - LenientResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.Maybe) } -) - -var ( - ErrNotBucket = errors.New("not a bucket") - ErrInvalidKeyLength = errors.New("invalid key length") - ErrInvalidValueLength = errors.New("invalid value length") - ErrInvalidPrefix = errors.New("invalid prefix") - ErrUnexpectedAttributeKey = errors.New("unexpected attribute key") -) - -func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { - return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, ErrNotBucket - } - if len(key) != 1 { - return nil, nil, ErrInvalidKeyLength - } - var b PrefixBucket - if b.prefix = Prefix(key[0]); b.prefix != prefix { - return nil, nil, ErrInvalidPrefix - } - b.resolvers = resolvers - return &b, next, nil - } -} - -func NewPrefixContainerBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { - return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, ErrNotBucket - } - if len(key) != 33 { - return nil, nil, ErrInvalidKeyLength - } - var b PrefixContainerBucket - if b.prefix = Prefix(key[0]); b.prefix != prefix { - return nil, nil, ErrInvalidPrefix - } - if err := b.id.Decode(key[1:]); err != nil { - return nil, nil, err - } - b.resolvers = resolvers - return &b, next, nil - } -} - -func NewUserBucketParser(next common.Parser, resolvers Resolvers) common.Parser { - return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, ErrNotBucket - } - var b UserBucket - if err := b.id.DecodeString(base58.Encode(key)); err != nil { - return nil, nil, err - } - b.resolvers = resolvers - return &b, next, nil - } -} - -func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Parser { - return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, ErrNotBucket - } - if len(key) != 32 { - return nil, nil, ErrInvalidKeyLength - } - var b ContainerBucket - if err := b.id.Decode(key); err != nil { - return nil, nil, err - } - b.resolvers = resolvers - return &b, next, nil - } -} - -func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { - return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil) -} - -func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser { - return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, ErrNotBucket - } - if len(key) < 34 { - return nil, nil, ErrInvalidKeyLength - } - var b UserAttributeKeyBucket - if b.prefix = Prefix(key[0]); b.prefix != UserAttribute { - return nil, nil, ErrInvalidPrefix - } - if err := b.id.Decode(key[1:33]); err != nil { - return nil, nil, err - } - b.key = string(key[33:]) - - if len(keys) != 0 && !slices.Contains(keys, b.key) { - return nil, nil, ErrUnexpectedAttributeKey - } - - return &b, next, nil - } -} - -func NewUserAttributeValueBucketParser(next common.Parser) common.Parser { - return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, ErrNotBucket - } - if len(key) == 0 { - return nil, nil, ErrInvalidKeyLength - } - var b UserAttributeValueBucket - b.value = string(key) - return &b, next, nil - } -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go deleted file mode 100644 index 4cc9e8765..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/parser.go +++ /dev/null @@ -1,52 +0,0 @@ -package metabase - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" -) - -var MetabaseParserV3 = common.WithFallback( - common.Any( - buckets.GraveyardParser, - buckets.GarbageParser, - buckets.ContainerVolumeParser, - buckets.LockedParser, - buckets.ShardInfoParser, - buckets.PrimaryParser, - buckets.LockersParser, - buckets.TombstoneParser, - buckets.SmallParser, - buckets.RootParser, - buckets.UserAttributeParserV3, - buckets.ParentParser, - buckets.SplitParser, - buckets.ContainerCountersParser, - buckets.ECInfoParser, - buckets.ExpirationEpochToObjectParser, - buckets.ObjectToExpirationEpochParser, - ), - common.RawParser.ToFallbackParser(), -) - -var MetabaseParserV2 = common.WithFallback( - common.Any( - buckets.GraveyardParser, - buckets.GarbageParser, - buckets.ContainerVolumeParser, - buckets.LockedParser, - buckets.ShardInfoParser, - buckets.PrimaryParser, - buckets.LockersParser, - buckets.TombstoneParser, - buckets.SmallParser, - buckets.RootParser, - buckets.OwnerParser, - buckets.UserAttributeParserV2, - buckets.PayloadHashParser, - buckets.ParentParser, - buckets.SplitParser, - buckets.ContainerCountersParser, - buckets.ECInfoParser, - ), - common.RawParser.ToFallbackParser(), -) diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go deleted file mode 100644 index 477c4fc9d..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go +++ /dev/null @@ -1,73 +0,0 @@ -package records - -import ( - "github.com/davecgh/go-spew/spew" -) - -func (r *GraveyardRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *GarbageRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ContainerVolumeRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *LockedRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ShardInfoRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ObjectRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *SmallRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *RootRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *OwnerRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *UserAttributeRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *PayloadHashRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ParentRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *SplitRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ContainerCountersRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ECInfoRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ExpirationEpochToObjectRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ObjectToExpirationEpochRecord) DetailedString() string { - return spew.Sdump(*r) -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go deleted file mode 100644 index e038911d7..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go +++ /dev/null @@ -1,168 +0,0 @@ -package records - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (r *GraveyardRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.object.Container().Equals(id), common.Yes, common.No) - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.object.Object().Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *GarbageRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No) - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ContainerVolumeRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ShardInfoRecord) Filter(string, any) common.FilterResult { - return common.No -} - -func (r *LockedRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ObjectRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *SmallRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *RootRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *OwnerRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *UserAttributeRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *PayloadHashRecord) Filter(string, any) common.FilterResult { - return common.No -} - -func (r *ParentRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.parent.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *SplitRecord) Filter(string, any) common.FilterResult { - return common.No -} - -func (r *ContainerCountersRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.id.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No) - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) - default: - return common.No - } -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go deleted file mode 100644 index 5d846cb75..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go +++ /dev/null @@ -1,293 +0,0 @@ -package records - -import ( - "encoding/binary" - "errors" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -var ( - ErrInvalidKeyLength = errors.New("invalid key length") - ErrInvalidValueLength = errors.New("invalid value length") - ErrInvalidPrefix = errors.New("invalid prefix") -) - -func GraveyardRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 64 { - return nil, nil, ErrInvalidKeyLength - } - if len(value) != 64 { - return nil, nil, ErrInvalidValueLength - } - var ( - cnr cid.ID - obj oid.ID - r GraveyardRecord - ) - - _ = cnr.Decode(key[:32]) - _ = obj.Decode(key[32:]) - - r.object.SetContainer(cnr) - r.object.SetObject(obj) - - _ = cnr.Decode(value[:32]) - _ = obj.Decode(value[32:]) - - r.tombstone.SetContainer(cnr) - r.tombstone.SetObject(obj) - - return &r, nil, nil -} - -func GarbageRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 64 { - return nil, nil, ErrInvalidKeyLength - } - var ( - cnr cid.ID - obj oid.ID - r GarbageRecord - ) - - _ = cnr.Decode(key[:32]) - _ = obj.Decode(key[32:]) - - r.addr.SetContainer(cnr) - r.addr.SetObject(obj) - - return &r, nil, nil -} - -func ContainerVolumeRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 32 { - return nil, nil, ErrInvalidKeyLength - } - if len(value) != 8 { - return nil, nil, ErrInvalidValueLength - } - var r ContainerVolumeRecord - - _ = r.id.Decode(key) - r.volume = binary.LittleEndian.Uint64(value) - - return &r, nil, nil -} - -func LockedRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var ( - r LockedRecord - err error - ) - - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - if r.ids, err = DecodeOIDs(value); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func ShardInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) == 0 { - return nil, nil, ErrInvalidKeyLength - } - - var r ShardInfoRecord - if string(key) == "id" { - r.label = string(key) - r.value = shard.ID(value).String() - - return &r, nil, nil - } - - if len(value) != 8 { - return nil, nil, ErrInvalidValueLength - } - r.label = string(key) - r.value = strconv.FormatUint(binary.LittleEndian.Uint64(value), 10) - - return &r, nil, nil -} - -func ObjectRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 32 { - return nil, nil, ErrInvalidKeyLength - } - var r ObjectRecord - - _ = r.id.Decode(key) - if err := r.object.Unmarshal(value); err != nil { - return nil, nil, err - } - - return &r, nil, nil -} - -func SmallRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var r SmallRecord - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - if len(value) != 0 { - x := string(value) - r.storageID = &x - } - return &r, nil, nil -} - -func RootRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var r RootRecord - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - if len(value) == 0 { - return &r, nil, nil - } - r.info = &objectSDK.SplitInfo{} - if err := r.info.Unmarshal(value); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func OwnerRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { - var r OwnerRecord - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func UserAttributeRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { - var r UserAttributeRecord - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func PayloadHashRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 32 { - return nil, nil, ErrInvalidKeyLength - } - var ( - err error - r PayloadHashRecord - ) - - r.checksum.SetSHA256([32]byte(key)) - if r.ids, err = DecodeOIDs(value); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func ParentRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var ( - r ParentRecord - err error - ) - if err = r.parent.Decode(key); err != nil { - return nil, nil, err - } - if r.ids, err = DecodeOIDs(value); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func SplitRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var ( - err error - r SplitRecord - ) - if err = r.id.UnmarshalBinary(key); err != nil { - return nil, nil, err - } - if r.ids, err = DecodeOIDs(value); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func ContainerCountersRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(value) != 24 { - return nil, nil, ErrInvalidValueLength - } - - var r ContainerCountersRecord - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - - r.logical = binary.LittleEndian.Uint64(value[:8]) - r.physical = binary.LittleEndian.Uint64(value[8:16]) - r.user = binary.LittleEndian.Uint64(value[16:24]) - - return &r, nil, nil -} - -func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - var ( - r ECInfoRecord - err error - ) - - if err := r.id.Decode(key); err != nil { - return nil, nil, err - } - if r.ids, err = DecodeOIDs(value); err != nil { - return nil, nil, err - } - return &r, nil, nil -} - -func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 72 { - return nil, nil, ErrInvalidKeyLength - } - - var ( - r ExpirationEpochToObjectRecord - err error - ) - - r.epoch = binary.BigEndian.Uint64(key[:8]) - if err = r.cnt.Decode(key[8:40]); err != nil { - return nil, nil, err - } - if err = r.obj.Decode(key[40:]); err != nil { - return nil, nil, err - } - - return &r, nil, nil -} - -func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 32 { - return nil, nil, ErrInvalidKeyLength - } - if len(value) != 8 { - return nil, nil, ErrInvalidValueLength - } - - var ( - r ObjectToExpirationEpochRecord - err error - ) - - if err = r.obj.Decode(key); err != nil { - return nil, nil, err - } - r.epoch = binary.LittleEndian.Uint64(value) - - return &r, nil, nil -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go deleted file mode 100644 index f71244625..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go +++ /dev/null @@ -1,155 +0,0 @@ -package records - -import ( - "fmt" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -func (r *GraveyardRecord) String() string { - return fmt.Sprintf( - "Object CID %s OID %s %c Tombstone CID %s OID %s", - common.FormatSimple(fmt.Sprintf("%-44s", r.object.Container()), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.object.Object()), tcell.ColorAqua), - tview.Borders.Vertical, - common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Container()), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Object()), tcell.ColorAqua), - ) -} - -func (r *GarbageRecord) String() string { - return fmt.Sprintf( - "CID %-44s OID %-44s", - common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua), - ) -} - -func (r *ContainerVolumeRecord) String() string { - return fmt.Sprintf( - "CID %-44s %c %d", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - r.volume, - ) -} - -func (r *LockedRecord) String() string { - return fmt.Sprintf( - "Object OID %s %c Lockers [%d]OID {...}", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - len(r.ids), - ) -} - -func (r *ShardInfoRecord) String() string { - return fmt.Sprintf("%-13s %c %s", r.label, tview.Borders.Vertical, r.value) -} - -func (r *ObjectRecord) String() string { - return fmt.Sprintf( - "OID %s %c Object {...}", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - ) -} - -func (r *SmallRecord) String() string { - s := fmt.Sprintf( - "OID %s %c", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - ) - if r.storageID != nil { - s = fmt.Sprintf("%s %s", s, *r.storageID) - } - return s -} - -func (r *RootRecord) String() string { - s := fmt.Sprintf( - "Root OID %s %c", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - ) - if r.info != nil { - s += " Split info {...}" - } - return s -} - -func (r *OwnerRecord) String() string { - return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua) -} - -func (r *UserAttributeRecord) String() string { - return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua) -} - -func (r *PayloadHashRecord) String() string { - return fmt.Sprintf( - "Checksum %s %c [%d]OID {...}", - common.FormatSimple(r.checksum.String(), tcell.ColorAqua), - tview.Borders.Vertical, - len(r.ids), - ) -} - -func (r *ParentRecord) String() string { - return fmt.Sprintf( - "Parent OID %s %c [%d]OID {...}", - common.FormatSimple(fmt.Sprintf("%-44s", r.parent), tcell.ColorAqua), - tview.Borders.Vertical, - len(r.ids), - ) -} - -func (r *SplitRecord) String() string { - return fmt.Sprintf( - "Split ID %s %c [%d]OID {...}", - common.FormatSimple(r.id.String(), tcell.ColorAqua), - tview.Borders.Vertical, - len(r.ids), - ) -} - -func (r *ContainerCountersRecord) String() string { - return fmt.Sprintf( - "CID %s %c logical %d, physical %d, user %d", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - r.logical, r.physical, r.user, - ) -} - -func (r *ECInfoRecord) String() string { - return fmt.Sprintf( - "OID %s %c [%d]OID {...}", - common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), - tview.Borders.Vertical, - len(r.ids), - ) -} - -func (r *ExpirationEpochToObjectRecord) String() string { - return fmt.Sprintf( - "exp. epoch %s %c CID %s OID %s", - common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua), - tview.Borders.Vertical, - common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), - ) -} - -func (r *ObjectToExpirationEpochRecord) String() string { - return fmt.Sprintf( - "OID %s %c exp. epoch %s", - common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), - tview.Borders.Vertical, - common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua), - ) -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go deleted file mode 100644 index 0809cad1a..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go +++ /dev/null @@ -1,93 +0,0 @@ -package records - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/google/uuid" -) - -type ( - GraveyardRecord struct { - object, tombstone oid.Address - } - - GarbageRecord struct { - addr oid.Address - } - - ContainerVolumeRecord struct { - id cid.ID - volume uint64 - } - - LockedRecord struct { - id oid.ID - ids []oid.ID - } - - ShardInfoRecord struct { - label string - value string - } - - ObjectRecord struct { - id oid.ID - object objectSDK.Object - } - - SmallRecord struct { - id oid.ID - storageID *string // optional - } - - RootRecord struct { - id oid.ID - info *objectSDK.SplitInfo // optional - } - - OwnerRecord struct { - id oid.ID - } - - UserAttributeRecord struct { - id oid.ID - } - - PayloadHashRecord struct { - checksum checksum.Checksum - ids []oid.ID - } - - ParentRecord struct { - parent oid.ID - ids []oid.ID - } - - SplitRecord struct { - id uuid.UUID - ids []oid.ID - } - - ContainerCountersRecord struct { - id cid.ID - logical, physical, user uint64 - } - - ECInfoRecord struct { - id oid.ID - ids []oid.ID - } - - ExpirationEpochToObjectRecord struct { - epoch uint64 - cnt cid.ID - obj oid.ID - } - - ObjectToExpirationEpochRecord struct { - obj oid.ID - epoch uint64 - } -) diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go deleted file mode 100644 index d15d69146..000000000 --- a/cmd/frostfs-lens/internal/schema/metabase/records/util.go +++ /dev/null @@ -1,20 +0,0 @@ -package records - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/nspcc-dev/neo-go/pkg/io" -) - -func DecodeOIDs(data []byte) ([]oid.ID, error) { - r := io.NewBinReaderFromBuf(data) - - size := r.ReadVarUint() - oids := make([]oid.ID, size) - - for i := range size { - if err := oids[i].Decode(r.ReadVarBytes()); err != nil { - return nil, err - } - } - return oids, nil -} diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go deleted file mode 100644 index 3bfe2608b..000000000 --- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go +++ /dev/null @@ -1,63 +0,0 @@ -package writecache - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" -) - -var WritecacheParser = common.WithFallback( - DefaultBucketParser, - common.RawParser.ToFallbackParser(), -) - -func DefaultBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if value != nil { - return nil, nil, errors.New("not a bucket") - } - if !bytes.Equal(key, []byte{0}) { - return nil, nil, errors.New("invalid key") - } - return &DefaultBucket{}, DefaultRecordParser, nil -} - -func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - parts := strings.Split(string(key), "/") - - if len(parts) != 2 { - return nil, nil, errors.New("invalid key, expected address string /") - } - - cnrRaw, err := base58.Decode(parts[0]) - if err != nil { - return nil, nil, errors.New("can't decode CID string") - } - objRaw, err := base58.Decode(parts[1]) - if err != nil { - return nil, nil, errors.New("can't decode OID string") - } - - cnr := cid.ID{} - if err := cnr.Decode(cnrRaw); err != nil { - return nil, nil, fmt.Errorf("can't decode CID: %w", err) - } - obj := oid.ID{} - if err := obj.Decode(objRaw); err != nil { - return nil, nil, fmt.Errorf("can't decode OID: %w", err) - } - - var r DefaultRecord - - r.addr.SetContainer(cnr) - r.addr.SetObject(obj) - - r.data = value - - return &r, nil, nil -} diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go deleted file mode 100644 index 11e6f3fcd..000000000 --- a/cmd/frostfs-lens/internal/schema/writecache/types.go +++ /dev/null @@ -1,68 +0,0 @@ -package writecache - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/davecgh/go-spew/spew" - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -type ( - DefaultBucket struct{} - - DefaultRecord struct { - addr oid.Address - // data used for record dump. - // nolint:unused - data []byte - } -) - -func (b *DefaultBucket) String() string { - return common.FormatSimple("0 Default", tcell.ColorLime) -} - -func (r *DefaultRecord) String() string { - return fmt.Sprintf( - "CID %s OID %s %c Data {...}", - common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua), - tview.Borders.Vertical, - ) -} - -func (b *DefaultBucket) DetailedString() string { - return spew.Sdump(*b) -} - -func (r *DefaultRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (b *DefaultBucket) Filter(typ string, _ any) common.FilterResult { - switch typ { - case "cid": - return common.Maybe - case "oid": - return common.Maybe - default: - return common.No - } -} - -func (r *DefaultRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No) - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No) - default: - return common.No - } -} diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go deleted file mode 100644 index 2d3b20792..000000000 --- a/cmd/frostfs-lens/internal/tui/buckets.go +++ /dev/null @@ -1,251 +0,0 @@ -package tui - -import ( - "context" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -type BucketsView struct { - *tview.Box - - mu sync.Mutex - - view *tview.TreeView - nodeToUpdate *tview.TreeNode - - ui *UI - filter *Filter -} - -type bucketNode struct { - bucket *Bucket - filter *Filter -} - -func NewBucketsView(ui *UI, filter *Filter) *BucketsView { - return &BucketsView{ - Box: tview.NewBox(), - view: tview.NewTreeView(), - ui: ui, - filter: filter, - } -} - -func (v *BucketsView) Mount(_ context.Context) error { - root := tview.NewTreeNode(".") - root.SetExpanded(false) - root.SetSelectable(false) - root.SetReference(&bucketNode{ - bucket: &Bucket{NextParser: v.ui.rootParser}, - filter: v.filter, - }) - - v.nodeToUpdate = root - - v.view.SetRoot(root) - v.view.SetCurrentNode(root) - - return nil -} - -func (v *BucketsView) Update(ctx context.Context) error { - if v.nodeToUpdate == nil { - return nil - } - defer func() { v.nodeToUpdate = nil }() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - ready := make(chan struct{}) - errCh := make(chan error) - - tmp := tview.NewTreeNode(v.nodeToUpdate.GetText()) - tmp.SetReference(v.nodeToUpdate.GetReference()) - - node := v.nodeToUpdate.GetReference().(*bucketNode) - - go func() { - defer close(ready) - - hasBuckets, err := HasBuckets(ctx, v.ui.db, node.bucket.Path) - if err != nil { - errCh <- err - } - - // Show the selected bucket's records instead. - if !hasBuckets && node.bucket.NextParser != nil { - v.ui.moveNextPage(NewRecordsView(v.ui, node.bucket, node.filter)) - } - - if v.nodeToUpdate.IsExpanded() { - return - } - - err = v.loadNodeChildren(ctx, tmp, node.filter) - if err != nil { - errCh <- err - } - }() - - select { - case <-ctx.Done(): - case <-ready: - v.mu.Lock() - v.nodeToUpdate.SetChildren(tmp.GetChildren()) - v.nodeToUpdate.SetExpanded(!v.nodeToUpdate.IsExpanded()) - v.mu.Unlock() - case err := <-errCh: - return err - } - - return nil -} - -func (v *BucketsView) Unmount() { -} - -func (v *BucketsView) Draw(screen tcell.Screen) { - x, y, width, height := v.GetInnerRect() - v.view.SetRect(x, y, width, height) - - v.view.Draw(screen) -} - -func (v *BucketsView) loadNodeChildren( - ctx context.Context, node *tview.TreeNode, filter *Filter, -) error { - parentBucket := node.GetReference().(*bucketNode).bucket - - path := parentBucket.Path - parser := parentBucket.NextParser - - buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) - - for item := range buffer { - if item.err != nil { - return item.err - } - bucket := item.val - - var err error - bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) - if err != nil { - return err - } - - satisfies, err := v.bucketSatisfiesFilter(ctx, bucket, filter) - if err != nil { - return err - } - if !satisfies { - continue - } - - child := tview.NewTreeNode(bucket.Entry.String()). - SetSelectable(true). - SetExpanded(false). - SetReference(&bucketNode{ - bucket: bucket, - filter: filter.Apply(bucket.Entry), - }) - - node.AddChild(child) - } - - return nil -} - -func (v *BucketsView) bucketSatisfiesFilter( - ctx context.Context, bucket *Bucket, filter *Filter, -) (bool, error) { - // Does the current bucket satisfies the filter? - filter = filter.Apply(bucket.Entry) - - if filter.Result() == common.Yes { - return true, nil - } - - if filter.Result() == common.No { - return false, nil - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Check the current bucket's nested buckets if exist - bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) - - for item := range bucketsBuffer { - if item.err != nil { - return false, item.err - } - b := item.val - - var err error - b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) - if err != nil { - return false, err - } - - satisfies, err := v.bucketSatisfiesFilter(ctx, b, filter) - if err != nil { - return false, err - } - if satisfies { - return true, nil - } - } - - // Check the current bucket's nested records if exist - recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) - - for item := range recordsBuffer { - if item.err != nil { - return false, item.err - } - r := item.val - - var err error - r.Entry, _, err = bucket.NextParser(r.Key, r.Value) - if err != nil { - return false, err - } - - if filter.Apply(r.Entry).Result() == common.Yes { - return true, nil - } - } - - return false, nil -} - -func (v *BucketsView) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { - return v.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { - currentNode := v.view.GetCurrentNode() - if currentNode == nil { - return - } - - switch event.Key() { - case tcell.KeyEnter: - // Expand or collapse the selected bucket's nested buckets, - // otherwise, navigate to that bucket's records. - v.nodeToUpdate = currentNode - case tcell.KeyCtrlR: - // Navigate to the selected bucket's records. - bucketNode := currentNode.GetReference().(*bucketNode) - v.ui.moveNextPage(NewRecordsView(v.ui, bucketNode.bucket, bucketNode.filter)) - case tcell.KeyCtrlD: - // Navigate to the selected bucket's detailed view. - bucketNode := currentNode.GetReference().(*bucketNode) - v.ui.moveNextPage(NewDetailedView(bucketNode.bucket.Entry.DetailedString())) - default: - v.view.InputHandler()(event, func(tview.Primitive) {}) - } - }) -} diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go deleted file mode 100644 index 94fa87f98..000000000 --- a/cmd/frostfs-lens/internal/tui/db.go +++ /dev/null @@ -1,151 +0,0 @@ -package tui - -import ( - "context" - "errors" - "fmt" - - "go.etcd.io/bbolt" -) - -type Item[T any] struct { - val T - err error -} - -func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) { - if len(path) == 0 { - return nil, errors.New("can't find bucket without path") - } - - name := path[0] - bucket := tx.Bucket(name) - if bucket == nil { - return nil, fmt.Errorf("no bucket with name %s", name) - } - for _, name := range path[1:] { - bucket = bucket.Bucket(name) - if bucket == nil { - return nil, fmt.Errorf("no bucket with name %s", name) - } - } - return bucket, nil -} - -func load[T any]( - ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, - filter func(key, value []byte) bool, transform func(key, value []byte) T, -) <-chan Item[T] { - buffer := make(chan Item[T], bufferSize) - - go func() { - defer close(buffer) - - err := db.View(func(tx *bbolt.Tx) error { - var cursor *bbolt.Cursor - if len(path) == 0 { - cursor = tx.Cursor() - } else { - bucket, err := resolvePath(tx, path) - if err != nil { - buffer <- Item[T]{err: fmt.Errorf("can't find bucket: %w", err)} - return nil - } - cursor = bucket.Cursor() - } - - key, value := cursor.First() - for { - if key == nil { - return nil - } - if filter != nil && !filter(key, value) { - key, value = cursor.Next() - continue - } - - select { - case <-ctx.Done(): - return nil - case buffer <- Item[T]{val: transform(key, value)}: - key, value = cursor.Next() - } - } - }) - if err != nil { - buffer <- Item[T]{err: err} - } - }() - - return buffer -} - -func LoadBuckets( - ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) <-chan Item[*Bucket] { - buffer := load( - ctx, db, path, bufferSize, - func(_, value []byte) bool { - return value == nil - }, - func(key, _ []byte) *Bucket { - base := make([][]byte, 0, len(path)) - base = append(base, path...) - - return &Bucket{ - Name: key, - Path: append(base, key), - } - }, - ) - - return buffer -} - -func LoadRecords( - ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) <-chan Item[*Record] { - buffer := load( - ctx, db, path, bufferSize, - func(_, value []byte) bool { - return value != nil - }, - func(key, value []byte) *Record { - base := make([][]byte, 0, len(path)) - base = append(base, path...) - - return &Record{ - Key: key, - Value: value, - Path: append(base, key), - } - }, - ) - - return buffer -} - -// HasBuckets checks if a bucket has nested buckets. It relies on assumption -// that a bucket can have either nested buckets or records but not both. -func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - buffer := load( - ctx, db, path, 1, - nil, - func(_, value []byte) []byte { return value }, - ) - - x, ok := <-buffer - if !ok { - return false, nil - } - if x.err != nil { - return false, x.err - } - if x.val != nil { - return false, nil - } - return true, nil -} diff --git a/cmd/frostfs-lens/internal/tui/detailed.go b/cmd/frostfs-lens/internal/tui/detailed.go deleted file mode 100644 index b2d897230..000000000 --- a/cmd/frostfs-lens/internal/tui/detailed.go +++ /dev/null @@ -1,24 +0,0 @@ -package tui - -import ( - "context" - - "github.com/rivo/tview" -) - -type DetailedView struct { - *tview.TextView -} - -func NewDetailedView(detailed string) *DetailedView { - v := &DetailedView{ - TextView: tview.NewTextView(), - } - v.SetDynamicColors(true) - v.SetText(detailed) - return v -} - -func (v *DetailedView) Mount(_ context.Context) error { return nil } -func (v *DetailedView) Update(_ context.Context) error { return nil } -func (v *DetailedView) Unmount() {} diff --git a/cmd/frostfs-lens/internal/tui/filter.go b/cmd/frostfs-lens/internal/tui/filter.go deleted file mode 100644 index e7879eca7..000000000 --- a/cmd/frostfs-lens/internal/tui/filter.go +++ /dev/null @@ -1,44 +0,0 @@ -package tui - -import ( - "maps" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" -) - -type Filter struct { - values map[string]any - results map[string]common.FilterResult -} - -func NewFilter(values map[string]any) *Filter { - f := &Filter{ - values: maps.Clone(values), - results: make(map[string]common.FilterResult), - } - for tag := range values { - f.results[tag] = common.No - } - return f -} - -func (f *Filter) Apply(e common.SchemaEntry) *Filter { - filter := &Filter{ - values: f.values, - results: maps.Clone(f.results), - } - - for tag, value := range filter.values { - filter.results[tag] = max(filter.results[tag], e.Filter(tag, value)) - } - - return filter -} - -func (f *Filter) Result() common.FilterResult { - current := common.Yes - for _, r := range f.results { - current = min(r, current) - } - return current -} diff --git a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt deleted file mode 100644 index c371b34e9..000000000 --- a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt +++ /dev/null @@ -1,38 +0,0 @@ -[green::b]HOTKEYS[-::-] - - [green::b]Navigation[-::-] - - [yellow::b]Down Arrow[-::-] / [yellow::b]j[-::-] - Scroll down. - - [yellow::b]Up Arrow[-::-] / [yellow::b]k[-::-] - Scroll up. - - [yellow::b]Page Down[-::-] / [yellow::b]Ctrl-f[-::-] - Scroll down by a full page. - - [yellow::b]Page Up[-::-] / [yellow::b]Ctrl-b[-::-] - Scroll up by a full page. - - [green::b]Actions[-::-] - - [yellow::b]Enter[-::-] - Perform actions based on the current context: - - In Buckets View: - - Expand/collapse the selected bucket to show/hide its nested buckets. - - If no nested buckets exist, navigate to the selected bucket's records. - - In Records View: Open the detailed view of the selected record. - - [yellow::b]Escape[-::-] - Return to the previous page, opposite of [yellow::b]Enter[-::-]. - - Refer to the [green::b]SEARCHING[-::-] section for more specific actions. - - - [green::b]Alternative Action Hotkeys[-::-] - - [yellow::b]Ctrl-r[-::-] - Directly navigate to the selected bucket's records. - - [yellow::b]Ctrl-d[-::-] - Access the detailed view of the selected bucket. diff --git a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt deleted file mode 100644 index bc2be512b..000000000 --- a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt +++ /dev/null @@ -1,26 +0,0 @@ -[green::b]SEARCHING[-::-] - - [green::b]Hotkeys[-::-] - - [yellow::b]/[-::-] - Initiate the search prompt. - - The prompt follows this syntax: [yellow::b]tag:value [+ tag:value]...[-::-] - - Multiple filter can be combined with [yellow::b]+[-::-], the result is an intersection of those filters' result sets. - - Any leading and trailing whitespace will be ignored. - - An empty prompt will return all results with no filters applied. - - Refer to the [green::b]Available Search Filters[-::-] section below for a list of valid filter tags. - - [yellow::b]Enter[-::-] - Execute the search based on the entered prompt. - - If the prompt is invalid, an error message will be displayed. - - [yellow::b]Escape[-::-] - Exit the search prompt without performing a search. - - [yellow::b]Down Arrow[-::-], [yellow::b]Up Arrow[-::-] - Scroll through the search history. - - - [green::b]Available Search Filters[-::-] - -%s diff --git a/cmd/frostfs-lens/internal/tui/help.go b/cmd/frostfs-lens/internal/tui/help.go deleted file mode 100644 index 3ab8fede0..000000000 --- a/cmd/frostfs-lens/internal/tui/help.go +++ /dev/null @@ -1,101 +0,0 @@ -package tui - -import ( - _ "embed" - "fmt" - "strings" - - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -var ( - //go:embed help-pages/hotkeys.txt - hotkeysHelpText string - - //go:embed help-pages/searching.txt - searchingHelpText string -) - -type HelpPage struct { - *tview.Box - pages []*tview.TextView - currentPage int - - filters []string - filterHints map[string]string -} - -func NewHelpPage(filters []string, hints map[string]string) *HelpPage { - hp := &HelpPage{ - Box: tview.NewBox(), - filters: filters, - filterHints: hints, - } - - page := tview.NewTextView(). - SetDynamicColors(true). - SetText(hotkeysHelpText) - hp.addPage(page) - - page = tview.NewTextView(). - SetDynamicColors(true). - SetText(fmt.Sprintf(searchingHelpText, hp.getFiltersText())) - hp.addPage(page) - - return hp -} - -func (hp *HelpPage) addPage(page *tview.TextView) { - hp.pages = append(hp.pages, page) -} - -func (hp *HelpPage) getFiltersText() string { - if len(hp.filters) == 0 { - return "\t\tNo filters defined.\n" - } - - filtersText := strings.Builder{} - gapSize := 4 - - tagMaxWidth := 3 - for _, filter := range hp.filters { - tagMaxWidth = max(tagMaxWidth, len(filter)) - } - filtersText.WriteString("\t\t[yellow::b]Tag") - filtersText.WriteString(strings.Repeat(" ", gapSize)) - filtersText.WriteString("\tValue[-::-]\n\n") - - for _, filter := range hp.filters { - filtersText.WriteString("\t\t") - filtersText.WriteString(filter) - filtersText.WriteString(strings.Repeat(" ", tagMaxWidth-len(filter)+gapSize)) - filtersText.WriteString(hp.filterHints[filter]) - filtersText.WriteRune('\n') - } - - return filtersText.String() -} - -func (hp *HelpPage) Draw(screen tcell.Screen) { - x, y, width, height := hp.GetInnerRect() - hp.pages[hp.currentPage].SetRect(x+1, y+1, width-2, height-2) - hp.pages[hp.currentPage].Draw(screen) -} - -func (hp *HelpPage) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { - return hp.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { - if event.Key() == tcell.KeyEnter { - hp.currentPage++ - hp.currentPage %= len(hp.pages) - return - } - hp.pages[hp.currentPage].InputHandler()(event, func(tview.Primitive) {}) - }) -} - -func (hp *HelpPage) MouseHandler() func(action tview.MouseAction, event *tcell.EventMouse, setFocus func(p tview.Primitive)) (consumed bool, capture tview.Primitive) { - return hp.WrapMouseHandler(func(action tview.MouseAction, event *tcell.EventMouse, _ func(tview.Primitive)) (consumed bool, capture tview.Primitive) { - return hp.pages[hp.currentPage].MouseHandler()(action, event, func(tview.Primitive) {}) - }) -} diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go deleted file mode 100644 index 471514e5d..000000000 --- a/cmd/frostfs-lens/internal/tui/input.go +++ /dev/null @@ -1,79 +0,0 @@ -package tui - -import ( - "slices" - - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -type InputFieldWithHistory struct { - *tview.InputField - history []string - historyLimit int - historyPointer int - currentContent string -} - -func NewInputFieldWithHistory(historyLimit int) *InputFieldWithHistory { - return &InputFieldWithHistory{ - InputField: tview.NewInputField(), - historyLimit: historyLimit, - } -} - -func (f *InputFieldWithHistory) AddToHistory(s string) { - // Stop scrolling history on history change, need to start scrolling again. - defer func() { f.historyPointer = len(f.history) }() - - // Used history data for search prompt, so just make that data recent. - if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { - f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) - f.history = append(f.history, s) - } - - if len(f.history) == f.historyLimit { - f.history = f.history[1:] - } - f.history = append(f.history, s) -} - -func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { - return f.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { - switch event.Key() { - case tcell.KeyDown: - if len(f.history) == 0 { - return - } - // Need to start iterating before. - if f.historyPointer == len(f.history) { - return - } - // Iterate to most recent prompts. - f.historyPointer++ - // Stop iterating over history. - if f.historyPointer == len(f.history) { - f.SetText(f.currentContent) - return - } - f.SetText(f.history[f.historyPointer]) - case tcell.KeyUp: - if len(f.history) == 0 { - return - } - // Start iterating over history. - if f.historyPointer == len(f.history) { - f.currentContent = f.GetText() - } - // End of history. - if f.historyPointer == 0 { - return - } - // Iterate to least recent prompts. - f.historyPointer-- - f.SetText(f.history[f.historyPointer]) - default: - f.InputField.InputHandler()(event, func(tview.Primitive) {}) - } - }) -} diff --git a/cmd/frostfs-lens/internal/tui/loading.go b/cmd/frostfs-lens/internal/tui/loading.go deleted file mode 100644 index 4b9384ad4..000000000 --- a/cmd/frostfs-lens/internal/tui/loading.go +++ /dev/null @@ -1,72 +0,0 @@ -package tui - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -type LoadingBar struct { - *tview.Box - view *tview.TextView - secondsElapsed atomic.Int64 - needDrawFunc func() - reset func() -} - -func NewLoadingBar(needDrawFunc func()) *LoadingBar { - b := &LoadingBar{ - Box: tview.NewBox(), - view: tview.NewTextView(), - needDrawFunc: needDrawFunc, - } - b.view.SetBackgroundColor(tview.Styles.PrimaryTextColor) - b.view.SetTextColor(b.GetBackgroundColor()) - - return b -} - -func (b *LoadingBar) Start(ctx context.Context) { - ctx, b.reset = context.WithCancel(ctx) - - go func() { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - b.secondsElapsed.Store(0) - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - b.secondsElapsed.Add(1) - b.needDrawFunc() - } - } - }() -} - -func (b *LoadingBar) Stop() { - b.reset() -} - -func (b *LoadingBar) Draw(screen tcell.Screen) { - seconds := b.secondsElapsed.Load() - - var time string - switch { - case seconds < 60: - time = fmt.Sprintf("%ds", seconds) - default: - time = fmt.Sprintf("%dm%ds", seconds/60, seconds%60) - } - b.view.SetText(fmt.Sprintf(" Loading... %s (press Escape to cancel) ", time)) - - x, y, width, _ := b.GetInnerRect() - b.view.SetRect(x, y, width, 1) - b.view.Draw(screen) -} diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go deleted file mode 100644 index a4d392ab3..000000000 --- a/cmd/frostfs-lens/internal/tui/records.go +++ /dev/null @@ -1,268 +0,0 @@ -package tui - -import ( - "context" - "errors" - "fmt" - "math" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" -) - -type updateType int - -const ( - other updateType = iota - moveToPrevPage - moveToNextPage - moveUp - moveDown - moveHome - moveEnd -) - -type RecordsView struct { - *tview.Box - - mu sync.RWMutex - - onUnmount func() - - bucket *Bucket - records []*Record - - buffer chan *Record - - firstRecordIndex int - lastRecordIndex int - selectedRecordIndex int - - updateType updateType - - ui *UI - filter *Filter -} - -func NewRecordsView(ui *UI, bucket *Bucket, filter *Filter) *RecordsView { - return &RecordsView{ - Box: tview.NewBox(), - bucket: bucket, - ui: ui, - filter: filter, - } -} - -func (v *RecordsView) Mount(ctx context.Context) error { - if v.onUnmount != nil { - return errors.New("try to mount already mounted component") - } - - ctx, v.onUnmount = context.WithCancel(ctx) - - tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) - - v.buffer = make(chan *Record, v.ui.loadBufferSize) - go func() { - defer close(v.buffer) - - for item := range tempBuffer { - if item.err != nil { - v.ui.stopOnError(item.err) - break - } - record := item.val - - var err error - record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) - if err != nil { - v.ui.stopOnError(err) - break - } - - if v.filter.Apply(record.Entry).Result() != common.Yes { - continue - } - - v.buffer <- record - } - }() - - return nil -} - -func (v *RecordsView) Unmount() { - assert.False(v.onUnmount == nil, "try to unmount not mounted component") - v.onUnmount() - v.onUnmount = nil -} - -func (v *RecordsView) Update(ctx context.Context) error { - _, _, _, recordsPerPage := v.GetInnerRect() - firstRecordIndex, lastRecordIndex, selectedRecordIndex := v.getNewIndexes() - -loop: - for len(v.records) < lastRecordIndex { - select { - case <-ctx.Done(): - return nil - case record, ok := <-v.buffer: - if !ok { - break loop - } - v.records = append(v.records, record) - } - } - - // Set the update type to its default value after some specific key event - // has been handled. - v.updateType = other - - firstRecordIndex = max(0, min(firstRecordIndex, len(v.records)-recordsPerPage)) - lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records)) - selectedRecordIndex = min(selectedRecordIndex, lastRecordIndex-1) - - v.mu.Lock() - v.firstRecordIndex = firstRecordIndex - v.lastRecordIndex = lastRecordIndex - v.selectedRecordIndex = selectedRecordIndex - v.mu.Unlock() - - return nil -} - -func (v *RecordsView) getNewIndexes() (int, int, int) { - v.mu.RLock() - firstRecordIndex := v.firstRecordIndex - lastRecordIndex := v.lastRecordIndex - selectedRecordIndex := v.selectedRecordIndex - v.mu.RUnlock() - - _, _, _, recordsPerPage := v.GetInnerRect() - - switch v.updateType { - case moveUp: - if selectedRecordIndex != firstRecordIndex { - selectedRecordIndex-- - break - } - firstRecordIndex = max(0, firstRecordIndex-1) - lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records)) - selectedRecordIndex = firstRecordIndex - case moveToPrevPage: - if selectedRecordIndex != firstRecordIndex { - selectedRecordIndex = firstRecordIndex - break - } - firstRecordIndex = max(0, firstRecordIndex-recordsPerPage) - lastRecordIndex = firstRecordIndex + recordsPerPage - selectedRecordIndex = firstRecordIndex - case moveDown: - if selectedRecordIndex != lastRecordIndex-1 { - selectedRecordIndex++ - break - } - firstRecordIndex++ - lastRecordIndex++ - selectedRecordIndex++ - case moveToNextPage: - if selectedRecordIndex != lastRecordIndex-1 { - selectedRecordIndex = lastRecordIndex - 1 - break - } - firstRecordIndex += recordsPerPage - lastRecordIndex = firstRecordIndex + recordsPerPage - selectedRecordIndex = lastRecordIndex - 1 - case moveHome: - firstRecordIndex = 0 - lastRecordIndex = firstRecordIndex + recordsPerPage - selectedRecordIndex = 0 - case moveEnd: - lastRecordIndex = math.MaxInt32 - firstRecordIndex = lastRecordIndex - recordsPerPage - selectedRecordIndex = lastRecordIndex - 1 - default: - lastRecordIndex = firstRecordIndex + recordsPerPage - } - - return firstRecordIndex, lastRecordIndex, selectedRecordIndex -} - -func (v *RecordsView) GetInnerRect() (int, int, int, int) { - x, y, width, height := v.Box.GetInnerRect() - - // Left padding. - x = min(x+3, x+width-1) - width = max(width-3, 0) - - return x, y, width, height -} - -func (v *RecordsView) Draw(screen tcell.Screen) { - v.mu.RLock() - firstRecordIndex := v.firstRecordIndex - lastRecordIndex := v.lastRecordIndex - selectedRecordIndex := v.selectedRecordIndex - records := v.records - v.mu.RUnlock() - - v.DrawForSubclass(screen, v) - - x, y, width, height := v.GetInnerRect() - if height == 0 { - return - } - - // No records in that bucket. - if firstRecordIndex == lastRecordIndex { - tview.Print( - screen, "Empty Bucket", x, y, width, tview.AlignCenter, tview.Styles.PrimaryTextColor, - ) - return - } - - for index := firstRecordIndex; index < lastRecordIndex; index++ { - result := records[index].Entry - text := result.String() - - if index == selectedRecordIndex { - text = fmt.Sprintf("[:white]%s[:-]", text) - tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimitiveBackgroundColor) - } else { - tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimaryTextColor) - } - - y++ - } -} - -func (v *RecordsView) InputHandler() func(event *tcell.EventKey, _ func(p tview.Primitive)) { - return v.WrapInputHandler(func(event *tcell.EventKey, _ func(p tview.Primitive)) { - switch m, k := event.Modifiers(), event.Key(); { - case m == 0 && k == tcell.KeyPgUp: - v.updateType = moveToPrevPage - case m == 0 && k == tcell.KeyPgDn: - v.updateType = moveToNextPage - case m == 0 && k == tcell.KeyUp: - v.updateType = moveUp - case m == 0 && k == tcell.KeyDown: - v.updateType = moveDown - case m == 0 && k == tcell.KeyHome: - v.updateType = moveHome - case m == 0 && k == tcell.KeyEnd: - v.updateType = moveEnd - case k == tcell.KeyEnter: - v.mu.RLock() - selectedRecordIndex := v.selectedRecordIndex - records := v.records - v.mu.RUnlock() - if len(records) != 0 { - current := records[selectedRecordIndex] - v.ui.moveNextPage(NewDetailedView(current.Entry.DetailedString())) - } - } - }) -} diff --git a/cmd/frostfs-lens/internal/tui/types.go b/cmd/frostfs-lens/internal/tui/types.go deleted file mode 100644 index 4a227fe64..000000000 --- a/cmd/frostfs-lens/internal/tui/types.go +++ /dev/null @@ -1,18 +0,0 @@ -package tui - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" -) - -type Bucket struct { - Name []byte - Path [][]byte - Entry common.SchemaEntry - NextParser common.Parser -} - -type Record struct { - Key, Value []byte - Path [][]byte - Entry common.SchemaEntry -} diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go deleted file mode 100644 index cc6b7859e..000000000 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ /dev/null @@ -1,561 +0,0 @@ -package tui - -import ( - "context" - "errors" - "fmt" - "strings" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "github.com/davecgh/go-spew/spew" - "github.com/gdamore/tcell/v2" - "github.com/rivo/tview" - "go.etcd.io/bbolt" -) - -type Config struct { - LoadBufferSize int - SearchHistorySize int - LoadingIndicatorLag time.Duration -} - -var DefaultConfig = Config{ - LoadBufferSize: 100, - SearchHistorySize: 100, - LoadingIndicatorLag: 500 * time.Millisecond, -} - -type Primitive interface { - tview.Primitive - - Mount(ctx context.Context) error - Update(ctx context.Context) error - Unmount() -} - -type UI struct { - *tview.Box - - // Need to use context while updating pages those read data from a database. - // Context should be shared among all mount and updates. Current TUI library - // doesn't use contexts at all, so I do that feature by myself. - //nolint:containedctx - ctx context.Context - onStop func() - - app *tview.Application - db *bbolt.DB - - pageHistory []Primitive - mountedPage Primitive - - pageToMount Primitive - - pageStub tview.Primitive - - infoBar *tview.TextView - searchBar *InputFieldWithHistory - loadingBar *LoadingBar - helpBar *tview.TextView - - helpPage *HelpPage - - searchErrorBar *tview.TextView - - isSearching bool - isLoading atomic.Bool - isShowingError bool - isShowingHelp bool - - loadBufferSize int - - rootParser common.Parser - - loadingIndicatorLag time.Duration - - cancelLoading func() - - filters map[string]func(string) (any, error) - compositeFilters map[string]func(string) (map[string]any, error) - filterHints map[string]string -} - -func NewUI( - ctx context.Context, - app *tview.Application, - db *bbolt.DB, - rootParser common.Parser, - cfg *Config, -) *UI { - spew.Config.DisableMethods = true - - if cfg == nil { - cfg = &DefaultConfig - } - - ui := &UI{ - Box: tview.NewBox(), - - app: app, - db: db, - rootParser: rootParser, - - filters: make(map[string]func(string) (any, error)), - compositeFilters: make(map[string]func(string) (map[string]any, error)), - filterHints: make(map[string]string), - - loadBufferSize: cfg.LoadBufferSize, - loadingIndicatorLag: cfg.LoadingIndicatorLag, - } - - ui.ctx, ui.onStop = context.WithCancel(ctx) - - backgroundColor := ui.GetBackgroundColor() - textColor := tview.Styles.PrimaryTextColor - - inverseBackgroundColor := textColor - inverseTextColor := backgroundColor - - alertTextColor := tcell.ColorRed - - ui.pageStub = tview.NewBox() - - ui.infoBar = tview.NewTextView() - ui.infoBar.SetBackgroundColor(inverseBackgroundColor) - ui.infoBar.SetTextColor(inverseTextColor) - ui.infoBar.SetText( - fmt.Sprintf(" %s (press h for help, q to quit) ", db.Path()), - ) - - ui.searchBar = NewInputFieldWithHistory(cfg.SearchHistorySize) - ui.searchBar.SetFieldBackgroundColor(backgroundColor) - ui.searchBar.SetFieldTextColor(textColor) - ui.searchBar.SetLabelColor(textColor) - ui.searchBar.Focus(nil) - ui.searchBar.SetLabel("/") - - ui.searchErrorBar = tview.NewTextView() - ui.searchErrorBar.SetBackgroundColor(backgroundColor) - ui.searchErrorBar.SetTextColor(alertTextColor) - - ui.helpBar = tview.NewTextView() - ui.helpBar.SetBackgroundColor(inverseBackgroundColor) - ui.helpBar.SetTextColor(inverseTextColor) - ui.helpBar.SetText(" Press Enter for next page or Escape to exit help ") - - ui.loadingBar = NewLoadingBar(ui.triggerDraw) - - ui.pageToMount = NewBucketsView(ui, NewFilter(nil)) - - return ui -} - -func (ui *UI) checkFilterExists(typ string) bool { - if _, ok := ui.filters[typ]; ok { - return true - } - if _, ok := ui.compositeFilters[typ]; ok { - return true - } - return false -} - -func (ui *UI) AddFilter( - typ string, - parser func(string) (any, error), - helpHint string, -) error { - if ui.checkFilterExists(typ) { - return fmt.Errorf("filter %s already exists", typ) - } - ui.filters[typ] = parser - ui.filterHints[typ] = helpHint - return nil -} - -func (ui *UI) AddCompositeFilter( - typ string, - parser func(string) (map[string]any, error), - helpHint string, -) error { - if ui.checkFilterExists(typ) { - return fmt.Errorf("filter %s already exists", typ) - } - ui.compositeFilters[typ] = parser - ui.filterHints[typ] = helpHint - return nil -} - -func (ui *UI) stopOnError(err error) { - if err != nil { - ui.onStop() - ui.app.QueueEvent(tcell.NewEventError(err)) - } -} - -func (ui *UI) stop() { - ui.onStop() - ui.app.Stop() -} - -func (ui *UI) movePrevPage() { - if len(ui.pageHistory) != 0 { - ui.mountedPage.Unmount() - ui.mountedPage = ui.pageHistory[len(ui.pageHistory)-1] - ui.pageHistory = ui.pageHistory[:len(ui.pageHistory)-1] - ui.triggerDraw() - } -} - -func (ui *UI) moveNextPage(page Primitive) { - ui.pageToMount = page - ui.triggerDraw() -} - -func (ui *UI) triggerDraw() { - go ui.app.QueueUpdateDraw(func() {}) -} - -func (ui *UI) Draw(screen tcell.Screen) { - if ui.isLoading.Load() { - ui.draw(screen) - return - } - - ui.isLoading.Store(true) - - ctx, cancel := context.WithCancel(ui.ctx) - - ready := make(chan struct{}) - go func() { - ui.load(ctx) - - cancel() - close(ready) - ui.isLoading.Store(false) - }() - - select { - case <-ready: - case <-time.After(ui.loadingIndicatorLag): - ui.loadingBar.Start(ui.ctx) - ui.cancelLoading = cancel - - go func() { - <-ready - ui.loadingBar.Stop() - ui.triggerDraw() - }() - } - - ui.draw(screen) -} - -func (ui *UI) load(ctx context.Context) { - if ui.mountedPage == nil && ui.pageToMount == nil { - ui.stop() - return - } - - if ui.pageToMount != nil { - ui.mountAndUpdate(ctx) - } else { - ui.update(ctx) - } -} - -func (ui *UI) draw(screen tcell.Screen) { - ui.DrawForSubclass(screen, ui) - x, y, width, height := ui.GetInnerRect() - - var ( - pageToDraw tview.Primitive - barToDraw tview.Primitive - ) - - switch { - case ui.isShowingHelp: - if ui.helpPage == nil { - var filters []string - for f := range ui.filters { - filters = append(filters, f) - } - for f := range ui.compositeFilters { - filters = append(filters, f) - } - ui.helpPage = NewHelpPage(filters, ui.filterHints) - } - pageToDraw = ui.helpPage - case ui.mountedPage != nil: - pageToDraw = ui.mountedPage - default: - pageToDraw = ui.pageStub - } - - pageToDraw.SetRect(x, y, width, height-1) - pageToDraw.Draw(screen) - - // Search bar uses cursor and we need to hide it when another bar is drawn. - screen.HideCursor() - - switch { - case ui.isLoading.Load(): - barToDraw = ui.loadingBar - case ui.isSearching: - barToDraw = ui.searchBar - case ui.isShowingError: - barToDraw = ui.searchErrorBar - case ui.isShowingHelp: - barToDraw = ui.helpBar - default: - barToDraw = ui.infoBar - } - - barToDraw.SetRect(x, y+height-1, width, 1) - barToDraw.Draw(screen) -} - -func (ui *UI) mountAndUpdate(ctx context.Context) { - defer func() { - // Operation succeeded or was canceled, either way reset page to mount. - ui.pageToMount = nil - }() - - // Mount should use app global context. - //nolint:contextcheck - err := ui.pageToMount.Mount(ui.ctx) - if err != nil { - ui.stopOnError(err) - return - } - - x, y, width, height := ui.GetInnerRect() - ui.pageToMount.SetRect(x, y, width, height-1) - - s := loadOp(ctx, ui.pageToMount.Update) - if s.err != nil { - ui.pageToMount.Unmount() - ui.stopOnError(s.err) - return - } - // Update was canceled. - if !s.done { - ui.pageToMount.Unmount() - return - } - - if ui.mountedPage != nil { - ui.pageHistory = append(ui.pageHistory, ui.mountedPage) - } - ui.mountedPage = ui.pageToMount -} - -func (ui *UI) update(ctx context.Context) { - x, y, width, height := ui.GetInnerRect() - ui.mountedPage.SetRect(x, y, width, height-1) - - s := loadOp(ctx, ui.mountedPage.Update) - if s.err != nil { - ui.stopOnError(s.err) - return - } -} - -type status struct { - done bool - err error -} - -func loadOp(ctx context.Context, op func(ctx context.Context) error) status { - errCh := make(chan error) - go func() { - errCh <- op(ctx) - }() - - select { - case <-ctx.Done(): - return status{done: false, err: nil} - case err := <-errCh: - return status{done: true, err: err} - } -} - -func (ui *UI) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) { - return ui.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) { - switch { - case ui.isLoading.Load(): - ui.handleInputOnLoading(event) - case ui.isShowingHelp: - ui.handleInputOnShowingHelp(event) - case ui.isShowingError: - ui.handleInputOnShowingError() - case ui.isSearching: - ui.handleInputOnSearching(event) - default: - ui.handleInput(event) - } - }) -} - -func (ui *UI) handleInput(event *tcell.EventKey) { - m, k, r := event.Modifiers(), event.Key(), event.Rune() - - switch { - case k == tcell.KeyEsc: - ui.movePrevPage() - case m == 0 && k == tcell.KeyRune && r == 'h': - ui.isShowingHelp = true - case m == 0 && k == tcell.KeyRune && r == '/': - ui.isSearching = true - case m == 0 && k == tcell.KeyRune && r == 'q': - ui.stop() - default: - if ui.mountedPage != nil { - ui.mountedPage.InputHandler()(event, func(tview.Primitive) {}) - } - } -} - -func (ui *UI) handleInputOnLoading(event *tcell.EventKey) { - switch k, r := event.Key(), event.Rune(); { - case k == tcell.KeyEsc: - ui.cancelLoading() - case k == tcell.KeyRune && r == 'q': - ui.stop() - } -} - -func (ui *UI) handleInputOnShowingError() { - ui.isShowingError = false - ui.isSearching = true -} - -func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) { - k, r := event.Key(), event.Rune() - - switch { - case k == tcell.KeyEsc: - ui.isShowingHelp = false - case k == tcell.KeyRune && r == 'q': - ui.stop() - default: - ui.helpPage.InputHandler()(event, func(tview.Primitive) {}) - } -} - -func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { - m, k := event.Modifiers(), event.Key() - - switch { - case k == tcell.KeyEnter: - prompt := ui.searchBar.GetText() - - res, err := ui.processPrompt(prompt) - if err != nil { - ui.isShowingError = true - ui.isSearching = false - ui.searchErrorBar.SetText(err.Error() + " (press any key to continue)") - return - } - - switch v := ui.mountedPage.(type) { - case *BucketsView: - ui.moveNextPage(NewBucketsView(ui, res)) - case *RecordsView: - bucket := v.bucket - ui.moveNextPage(NewRecordsView(ui, bucket, res)) - } - - if ui.searchBar.GetText() != "" { - ui.searchBar.AddToHistory(ui.searchBar.GetText()) - } - - ui.searchBar.SetText("") - ui.isSearching = false - case k == tcell.KeyEsc: - ui.isSearching = false - case (k == tcell.KeyBackspace2 || m&tcell.ModCtrl != 0 && k == tcell.KeyETB) && len(ui.searchBar.GetText()) == 0: - ui.isSearching = false - default: - ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) - } - - ui.MouseHandler() -} - -func (ui *UI) WithPrompt(prompt string) error { - filter, err := ui.processPrompt(prompt) - if err != nil { - return err - } - - ui.pageToMount = NewBucketsView(ui, filter) - - if prompt != "" { - ui.searchBar.AddToHistory(prompt) - } - - return nil -} - -func (ui *UI) processPrompt(prompt string) (filter *Filter, err error) { - if prompt == "" { - return NewFilter(nil), nil - } - - filterMap := make(map[string]any) - - for _, filterString := range strings.Split(prompt, "+") { - parts := strings.Split(filterString, ":") - if len(parts) != 2 { - return nil, errors.New("expected 'tag:value [+ tag:value]...'") - } - - filterTag := strings.TrimSpace(parts[0]) - filterValueString := strings.TrimSpace(parts[1]) - - if _, exists := filterMap[filterTag]; exists { - return nil, fmt.Errorf("duplicate filter tag '%s'", filterTag) - } - - parser, ok := ui.filters[filterTag] - if ok { - filterValue, err := parser(filterValueString) - if err != nil { - return nil, fmt.Errorf("can't parse '%s' filter value: %w", filterTag, err) - } - - filterMap[filterTag] = filterValue - continue - } - - compositeParser, ok := ui.compositeFilters[filterTag] - if ok { - compositeFilterValue, err := compositeParser(filterValueString) - if err != nil { - return nil, fmt.Errorf( - "can't parse '%s' filter value '%s': %w", - filterTag, filterValueString, err, - ) - } - - for tag, value := range compositeFilterValue { - if _, exists := filterMap[tag]; exists { - return nil, fmt.Errorf( - "found duplicate filter tag '%s' while processing composite filter with tag '%s'", - tag, filterTag, - ) - } - - filterMap[tag] = value - } - continue - } - - return nil, fmt.Errorf("unknown filter tag '%s'", filterTag) - } - - return NewFilter(filterMap), nil -} diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go deleted file mode 100644 index 2d1ab3e33..000000000 --- a/cmd/frostfs-lens/internal/tui/util.go +++ /dev/null @@ -1,110 +0,0 @@ -package tui - -import ( - "errors" - "strings" - "time" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" - "go.etcd.io/bbolt" -) - -func OpenDB(path string, writable bool) (*bbolt.DB, error) { - db, err := bbolt.Open(path, 0o600, &bbolt.Options{ - ReadOnly: !writable, - Timeout: 100 * time.Millisecond, - }) - if err != nil { - return nil, err - } - return db, nil -} - -func CIDParser(s string) (any, error) { - data, err := base58.Decode(s) - if err != nil { - return nil, err - } - var id cid.ID - if err = id.Decode(data); err != nil { - return nil, err - } - return id, nil -} - -func OIDParser(s string) (any, error) { - data, err := base58.Decode(s) - if err != nil { - return nil, err - } - var id oid.ID - if err = id.Decode(data); err != nil { - return nil, err - } - return id, nil -} - -func AddressParser(s string) (map[string]any, error) { - m := make(map[string]any) - - parts := strings.Split(s, "/") - if len(parts) != 2 { - return nil, errors.New("expected /") - } - cnr, err := CIDParser(parts[0]) - if err != nil { - return nil, err - } - obj, err := OIDParser(parts[1]) - if err != nil { - return nil, err - } - - m["cid"] = cnr - m["oid"] = obj - - return m, nil -} - -func keyParser(s string) (any, error) { - if s == "" { - return nil, errors.New("empty attribute key") - } - return s, nil -} - -func valueParser(s string) (any, error) { - if s == "" { - return nil, errors.New("empty attribute value") - } - return s, nil -} - -func AttributeParser(s string) (map[string]any, error) { - m := make(map[string]any) - - parts := strings.Split(s, "/") - if len(parts) != 1 && len(parts) != 2 { - return nil, errors.New("expected or /") - } - - key, err := keyParser(parts[0]) - if err != nil { - return nil, err - } - m["key"] = key - - if len(parts) == 1 { - return m, nil - } - - value, err := valueParser(parts[1]) - if err != nil { - return nil, err - } - m["value"] = value - - return m, nil -} diff --git a/cmd/frostfs-lens/internal/writecache/inspect.go b/cmd/frostfs-lens/internal/writecache/inspect.go deleted file mode 100644 index afc986c8b..000000000 --- a/cmd/frostfs-lens/internal/writecache/inspect.go +++ /dev/null @@ -1,40 +0,0 @@ -package writecache - -import ( - "os" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/spf13/cobra" -) - -var inspectCMD = &cobra.Command{ - Use: "inspect", - Short: "Object inspection", - Long: `Inspect specific object in a write-cache.`, - Run: inspectFunc, -} - -func init() { - common.AddAddressFlag(inspectCMD, &vAddress) - common.AddComponentPathFlag(inspectCMD, &vPath) - common.AddOutputFileFlag(inspectCMD, &vOut) -} - -func inspectFunc(cmd *cobra.Command, _ []string) { - var data []byte - - db, err := writecache.OpenDB(vPath, true, os.OpenFile) - common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err)) - defer db.Close() - - data, err = writecache.Get(db, []byte(vAddress)) - common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err)) - - var o objectSDK.Object - common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w", o.Unmarshal(data))) - - common.PrintObjectHeader(cmd, o) - common.WriteObjectToFile(cmd, vOut, data) -} diff --git a/cmd/frostfs-lens/internal/writecache/list.go b/cmd/frostfs-lens/internal/writecache/list.go deleted file mode 100644 index bcbae0ec9..000000000 --- a/cmd/frostfs-lens/internal/writecache/list.go +++ /dev/null @@ -1,40 +0,0 @@ -package writecache - -import ( - "fmt" - "io" - "os" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var listCMD = &cobra.Command{ - Use: "inspect", - Short: "Object inspection", - Long: `Inspect specific object in a write-cache.`, - Run: listFunc, -} - -func init() { - common.AddComponentPathFlag(listCMD, &vPath) -} - -func listFunc(cmd *cobra.Command, _ []string) { - // other targets can be supported - w := cmd.OutOrStderr() - - wAddr := func(addr oid.Address) error { - _, err := io.WriteString(w, fmt.Sprintf("%s\n", addr)) - return err - } - - db, err := writecache.OpenDB(vPath, true, os.OpenFile) - common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err)) - defer db.Close() - - err = writecache.IterateDB(db, wAddr) - common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err)) -} diff --git a/cmd/frostfs-lens/internal/writecache/root.go b/cmd/frostfs-lens/internal/writecache/root.go deleted file mode 100644 index d7d6db240..000000000 --- a/cmd/frostfs-lens/internal/writecache/root.go +++ /dev/null @@ -1,21 +0,0 @@ -package writecache - -import ( - "github.com/spf13/cobra" -) - -var ( - vAddress string - vPath string - vOut string -) - -// Root contains `write-cache` command definition. -var Root = &cobra.Command{ - Use: "write-cache", - Short: "Operations with write-cache", -} - -func init() { - Root.AddCommand(listCMD, inspectCMD, tuiCMD) -} diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go deleted file mode 100644 index b7e4d7c96..000000000 --- a/cmd/frostfs-lens/internal/writecache/tui.go +++ /dev/null @@ -1,68 +0,0 @@ -package writecache - -import ( - "context" - "fmt" - - common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" - "github.com/rivo/tview" - "github.com/spf13/cobra" -) - -var tuiCMD = &cobra.Command{ - Use: "explore", - Short: "Write cache exploration with a terminal UI", - Long: `Launch a terminal UI to explore write cache and search for data. - -Available search filters: -- cid CID -- oid OID -- addr CID/OID -`, - Run: tuiFunc, -} - -var initialPrompt string - -func init() { - common.AddComponentPathFlag(tuiCMD, &vPath) - - tuiCMD.Flags().StringVar( - &initialPrompt, - "filter", - "", - "Filter prompt to start with, format 'tag:value [+ tag:value]...'", - ) -} - -func tuiFunc(cmd *cobra.Command, _ []string) { - common.ExitOnErr(cmd, runTUI(cmd)) -} - -func runTUI(cmd *cobra.Command) error { - db, err := tui.OpenDB(vPath, false) - if err != nil { - return fmt.Errorf("couldn't open database: %w", err) - } - defer db.Close() - - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() - - app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, schema.WritecacheParser, nil) - - _ = ui.AddFilter("cid", tui.CIDParser, "CID") - _ = ui.AddFilter("oid", tui.OIDParser, "OID") - _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID") - - err = ui.WithPrompt(initialPrompt) - if err != nil { - return fmt.Errorf("invalid filter prompt: %w", err) - } - - app.SetRoot(ui, true).SetFocus(ui) - return app.Run() -} diff --git a/cmd/frostfs-lens/root.go b/cmd/frostfs-lens/root.go deleted file mode 100644 index 96ade802c..000000000 --- a/cmd/frostfs-lens/root.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/meta" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc" - "github.com/spf13/cobra" -) - -var command = &cobra.Command{ - Use: "frostfs-lens", - Short: "FrostFS Storage Engine Lens", - Long: `FrostFS Storage Engine Lens provides tools to browse the contents of the FrostFS storage engine.`, - RunE: entryPoint, - SilenceUsage: true, -} - -func entryPoint(cmd *cobra.Command, _ []string) error { - printVersion, _ := cmd.Flags().GetBool("version") - if printVersion { - cmd.Print(misc.BuildInfo("FrostFS Lens")) - - return nil - } - - return cmd.Usage() -} - -func init() { - // use stdout as default output for cmd.Print() - command.SetOut(os.Stdout) - command.Flags().Bool("version", false, "Application version") - command.AddCommand( - blobovnicza.Root, - meta.Root, - writecache.Root, - gendoc.Command(command, gendoc.Options{}), - ) -} - -func main() { - err := command.Execute() - if err != nil { - os.Exit(1) - } -} diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go deleted file mode 100644 index 2d52e0c56..000000000 --- a/cmd/frostfs-node/accounting.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "context" - "net" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc" - accountingService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" - accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph" - accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc" - "google.golang.org/grpc" -) - -func initAccountingService(ctx context.Context, c *cfg) { - c.initMorphComponents(ctx) - - balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0) - fatalOnErr(err) - - server := accountingTransportGRPC.New( - accountingService.NewSignService( - &c.key.PrivateKey, - accountingService.NewExecutionService( - accounting.NewExecutor(balanceMorphWrapper), - c.respSvc, - ), - ), - ) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - accountingGRPC.RegisterAccountingServiceServer(s, server) - - // TODO(@aarifullin): #1487 remove the dual service support. - s.RegisterService(frostFSServiceDesc(accountingGRPC.AccountingService_ServiceDesc), server) - }) -} - -// frostFSServiceDesc creates a service descriptor with the new namespace for dual service support. -func frostFSServiceDesc(sd grpc.ServiceDesc) *grpc.ServiceDesc { - sdLegacy := new(grpc.ServiceDesc) - *sdLegacy = sd - - const ( - legacyNamespace = "neo.fs.v2" - apemanagerLegacyNamespace = "frostfs.v2" - newNamespace = "frost.fs" - ) - - if strings.HasPrefix(sd.ServiceName, legacyNamespace) { - sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, legacyNamespace, newNamespace) - } else if strings.HasPrefix(sd.ServiceName, apemanagerLegacyNamespace) { - sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, apemanagerLegacyNamespace, newNamespace) - } - return sdLegacy -} diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go deleted file mode 100644 index 513314712..000000000 --- a/cmd/frostfs-node/apemanager.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "net" - - ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage" - morph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - apemanager_transport "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/apemanager/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager" - apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" - "google.golang.org/grpc" -) - -func initAPEManagerService(c *cfg) { - contractStorage := ape_contract.NewProxyVerificationContractStorage( - morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.key, - c.cfgMorph.proxyScriptHash, - c.cfgObject.cfgAccessPolicyEngine.policyContractHash) - - execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, - c.cfgMorph.client, - apemanager.WithLogger(c.log)) - sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) - auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) - server := apemanager_transport.New(auditSvc) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - apemanager_grpc.RegisterAPEManagerServiceServer(s, server) - - // TODO(@aarifullin): #1487 remove the dual service support. - s.RegisterService(frostFSServiceDesc(apemanager_grpc.APEManagerService_ServiceDesc), server) - }) -} diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go deleted file mode 100644 index ce8ae9662..000000000 --- a/cmd/frostfs-node/attributes.go +++ /dev/null @@ -1,10 +0,0 @@ -package main - -import ( - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/attributes" -) - -func parseAttributes(c *cfg) { - fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) -} diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go deleted file mode 100644 index 41841417b..000000000 --- a/cmd/frostfs-node/cache.go +++ /dev/null @@ -1,480 +0,0 @@ -package main - -import ( - "bytes" - "cmp" - "context" - "slices" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/hashicorp/golang-lru/v2/simplelru" - "go.uber.org/zap" -) - -type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) - -type valueWithError[V any] struct { - v V - // cached error in order to not repeat failed request for some time - e error -} - -// entity that provides TTL cache interface. -type ttlNetCache[K comparable, V any] struct { - cache *expirable.LRU[K, *valueWithError[V]] - netRdr netValueReader[K, V] - keyLocker *utilSync.KeyLocker[K] - metrics cacheMetrics -} - -// complicates netValueReader with TTL caching mechanism. -func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr netValueReader[K, V], metrics cacheMetrics) *ttlNetCache[K, V] { - cache := expirable.NewLRU[K, *valueWithError[V]](sz, nil, ttl) - - return &ttlNetCache[K, V]{ - cache: cache, - netRdr: netRdr, - metrics: metrics, - keyLocker: utilSync.NewKeyLocker[K](), - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss or by TTL. -// -// returned value should not be modified. -func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - val, ok := c.cache.Peek(key) - if ok { - hit = true - return val.v, val.e - } - - c.keyLocker.Lock(key) - defer c.keyLocker.Unlock(key) - - val, ok = c.cache.Peek(key) - if ok { - hit = true - return val.v, val.e - } - - v, err := c.netRdr(ctx, key) - - c.cache.Add(key, &valueWithError[V]{ - v: v, - e: err, - }) - - return v, err -} - -func (c *ttlNetCache[K, V]) set(k K, v V, e error) { - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Set", time.Since(startedAt), false) - }() - - c.keyLocker.Lock(k) - defer c.keyLocker.Unlock(k) - - c.cache.Add(k, &valueWithError[V]{ - v: v, - e: e, - }) -} - -func (c *ttlNetCache[K, V]) remove(key K) { - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Remove", time.Since(startedAt), hit) - }() - - c.keyLocker.Lock(key) - defer c.keyLocker.Unlock(key) - - hit = c.cache.Remove(key) -} - -// wrapper over TTL cache of values read from the network -// that implements container storage. -type ttlContainerStorage struct { - containerCache *ttlNetCache[cid.ID, *container.Container] - delInfoCache *ttlNetCache[cid.ID, *container.DelInfo] -} - -func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { - lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { - return v.Get(ctx, id) - }, metrics.NewCacheMetrics("container")) - lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return v.DeletionInfo(ctx, id) - }, metrics.NewCacheMetrics("container_deletion_info")) - - return ttlContainerStorage{ - containerCache: lruCnrCache, - delInfoCache: lruDelInfoCache, - } -} - -func (s ttlContainerStorage) handleRemoval(cnr cid.ID) { - s.containerCache.set(cnr, nil, new(apistatus.ContainerNotFound)) - - // The removal invalidates possibly stored error response. - s.delInfoCache.remove(cnr) -} - -// Get returns container value from the cache. If value is missing in the cache -// or expired, then it returns value from side chain and updates the cache. -func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { - return s.containerCache.get(ctx, cnr) -} - -func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { - return s.delInfoCache.get(ctx, cnr) -} - -type lruNetmapSource struct { - netState netmap.State - - client rawSource - cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] - mtx sync.RWMutex - metrics cacheMetrics - log *logger.Logger - candidates atomic.Pointer[[]netmapSDK.NodeInfo] -} - -type rawSource interface { - GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) - GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) -} - -func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, - netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, -) netmap.Source { - const netmapCacheSize = 10 - - cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) - fatalOnErr(err) - - src := &lruNetmapSource{ - netState: netState, - client: client, - cache: cache, - log: log, - metrics: metrics.NewCacheMetrics("netmap"), - } - - wg.Add(1) - go func() { - defer wg.Done() - src.updateCandidates(ctx, d) - }() - - return src -} - -// updateCandidates routine to merge netmap in cache with candidates list. -func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { - timer := time.NewTimer(d) - defer timer.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-timer.C: - newCandidates, err := s.client.GetCandidates(ctx) - if err != nil { - s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) - timer.Reset(d) - break - } - if len(newCandidates) == 0 { - s.candidates.Store(&newCandidates) - timer.Reset(d) - break - } - slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { - return cmp.Compare(n1.Hash(), n2.Hash()) - }) - - // Check once state changed - v := s.candidates.Load() - if v == nil { - s.candidates.Store(&newCandidates) - s.mergeCacheWithCandidates(newCandidates) - timer.Reset(d) - break - } - ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { - if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || - uint32(n1.Status()) != uint32(n2.Status()) || - slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { - return 1 - } - ne1 := slices.Collect(n1.NetworkEndpoints()) - ne2 := slices.Collect(n2.NetworkEndpoints()) - return slices.Compare(ne1, ne2) - }) - if ret != 0 { - s.candidates.Store(&newCandidates) - s.mergeCacheWithCandidates(newCandidates) - } - timer.Reset(d) - } - } -} - -func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { - s.mtx.RLock() - tmp := s.cache.Values() - s.mtx.RUnlock() - for _, pointer := range tmp { - nm := pointer.Load() - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - nm = nm.Clone() - mergeNetmapWithCandidates(updates, nm) - pointer.Store(nm) - } - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - s.mtx.Lock() - defer s.mtx.Unlock() - val, ok := s.cache.Get(key) - if ok { - hit = true - return val.Load(), nil - } - - nm, err := s.client.GetNetMapByEpoch(ctx, key) - if err != nil { - return nil, err - } - v := s.candidates.Load() - if v != nil { - updates := getNetMapNodesToUpdate(nm, *v) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - } - - p := atomic.Pointer[netmapSDK.NetMap]{} - p.Store(nm) - s.cache.Add(key, &p) - - return nm, nil -} - -// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. -func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { - for _, v := range updates { - if v.status != netmapSDK.UnspecifiedState { - nm.Nodes()[v.netmapIndex].SetStatus(v.status) - } - if v.externalAddresses != nil { - nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) - } - if v.endpoints != nil { - nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) - } - } -} - -type nodeToUpdate struct { - netmapIndex int - status netmapSDK.NodeState - externalAddresses []string - endpoints []string -} - -// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. -func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { - var res []nodeToUpdate - for i := range nm.Nodes() { - for _, cnd := range candidates { - if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { - var tmp nodeToUpdate - var update bool - - if cnd.Status() != nm.Nodes()[i].Status() && - (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { - update = true - tmp.status = cnd.Status() - } - - externalAddresses := cnd.ExternalAddresses() - if externalAddresses != nil && - slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { - update = true - tmp.externalAddresses = externalAddresses - } - - nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) - nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) - candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) - candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) - if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { - update = true - tmp.endpoints = candidateEndpoints - } - - if update { - tmp.netmapIndex = i - res = append(res, tmp) - } - - break - } - } - } - return res -} - -func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) -} - -func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(ctx, epoch) -} - -func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.get(ctx, epoch) - if err != nil { - return nil, err - } - - return val, nil -} - -func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { - return s.netState.CurrentEpoch(), nil -} - -type cachedIRFetcher struct { - *ttlNetCache[struct{}, [][]byte] -} - -func newCachedIRFetcher(f interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) -}, -) cachedIRFetcher { - const ( - irFetcherCacheSize = 1 // we intend to store only one value - - // Without the cache in the testnet we can see several hundred simultaneous - // requests (frostfs-node #1278), so limiting the request rate solves the issue. - // - // Exact request rate doesn't really matter because Inner Ring list update - // happens extremely rare, but there is no side chain events for that as - // for now (frostfs-contract v0.15.0 notary disabled env) to monitor it. - irFetcherCacheTTL = 30 * time.Second - ) - - irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, - func(ctx context.Context, _ struct{}) ([][]byte, error) { - return f.InnerRingKeys(ctx) - }, metrics.NewCacheMetrics("ir_keys"), - ) - - return cachedIRFetcher{irFetcherCache} -} - -// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in -// the cache or expired, then it returns keys from side chain and updates -// the cache. -func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { - val, err := f.get(ctx, struct{}{}) - if err != nil { - return nil, err - } - - return val, nil -} - -type ttlMaxObjectSizeCache struct { - mtx sync.RWMutex - lastUpdated time.Time - lastSize uint64 - src objectwriter.MaxSizeSource - metrics cacheMetrics -} - -func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.MaxSizeSource { - return &ttlMaxObjectSizeCache{ - src: src, - metrics: metrics.NewCacheMetrics("max_object_size"), - } -} - -func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { - const ttl = time.Second * 30 - - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - c.mtx.RLock() - prevUpdated := c.lastUpdated - size := c.lastSize - c.mtx.RUnlock() - - if time.Since(prevUpdated) < ttl { - hit = true - return size - } - - c.mtx.Lock() - size = c.lastSize - if !c.lastUpdated.After(prevUpdated) { - size = c.src.MaxObjectSize(ctx) - c.lastSize = size - c.lastUpdated = time.Now() - } - c.mtx.Unlock() - - return size -} - -type cacheMetrics interface { - AddMethodDuration(method string, d time.Duration, hit bool) -} diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go deleted file mode 100644 index 24286826f..000000000 --- a/cmd/frostfs-node/cache_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package main - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/stretchr/testify/require" -) - -func TestTTLNetCache(t *testing.T) { - ttlDuration := time.Millisecond * 50 - cache := newNetworkTTLCache(10, ttlDuration, testNetValueReader, &noopCacheMetricts{}) - - key := "key" - - t.Run("Test Add and Get", func(t *testing.T) { - ti := time.Now() - cache.set(key, ti, nil) - val, err := cache.get(context.Background(), key) - require.NoError(t, err) - require.Equal(t, ti, val) - }) - - t.Run("Test TTL", func(t *testing.T) { - ti := time.Now() - cache.set(key, ti, nil) - time.Sleep(2 * ttlDuration) - val, err := cache.get(context.Background(), key) - require.NoError(t, err) - require.NotEqual(t, val, ti) - }) - - t.Run("Test Remove", func(t *testing.T) { - ti := time.Now() - cache.set(key, ti, nil) - cache.remove(key) - val, err := cache.get(context.Background(), key) - require.NoError(t, err) - require.NotEqual(t, val, ti) - }) - - t.Run("Test Cache Error", func(t *testing.T) { - cache.set("error", time.Now(), errors.New("mock error")) - _, err := cache.get(context.Background(), "error") - require.Error(t, err) - require.Equal(t, "mock error", err.Error()) - }) -} - -func testNetValueReader(_ context.Context, key string) (time.Time, error) { - if key == "error" { - return time.Now(), errors.New("mock error") - } - return time.Now(), nil -} - -type noopCacheMetricts struct{} - -func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} - -type rawSrc struct{} - -func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { - node0 := netmapSDK.NodeInfo{} - node0.SetPublicKey([]byte{byte(1)}) - node0.SetStatus(netmapSDK.Online) - node0.SetExternalAddresses("1", "0") - node0.SetNetworkEndpoints("1", "0") - - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey([]byte{byte(1)}) - node1.SetStatus(netmapSDK.Online) - node1.SetExternalAddresses("1", "0") - node1.SetNetworkEndpoints("1", "0") - - return []netmapSDK.NodeInfo{node0, node1}, nil -} - -func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - nm := netmapSDK.NetMap{} - nm.SetEpoch(1) - - node0 := netmapSDK.NodeInfo{} - node0.SetPublicKey([]byte{byte(1)}) - node0.SetStatus(netmapSDK.Maintenance) - node0.SetExternalAddresses("0") - node0.SetNetworkEndpoints("0") - - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey([]byte{byte(1)}) - node1.SetStatus(netmapSDK.Maintenance) - node1.SetExternalAddresses("0") - node1.SetNetworkEndpoints("0") - - nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) - - return &nm, nil -} - -type st struct{} - -func (s *st) CurrentEpoch() uint64 { - return 1 -} - -func TestNetmapStorage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - wg := sync.WaitGroup{} - cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) - - nm, err := cache.GetNetMapByEpoch(ctx, 1) - require.NoError(t, err) - require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) - require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) - require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) - - require.Eventually(t, func() bool { - nm, err := cache.GetNetMapByEpoch(ctx, 1) - require.NoError(t, err) - for _, node := range nm.Nodes() { - if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && - node.NumberOfNetworkEndpoints() == 2) { - return false - } - } - return true - }, time.Second*5, time.Millisecond*10) - - cancel() - wg.Wait() -} diff --git a/cmd/frostfs-node/closer.go b/cmd/frostfs-node/closer.go deleted file mode 100644 index b370f56f9..000000000 --- a/cmd/frostfs-node/closer.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -type closer struct { - name string - fn func() -} - -func getCloser(c *cfg, name string) *closer { - for _, clsr := range c.closers { - if clsr.name == name { - return &clsr - } - } - return nil -} - -func delCloser(c *cfg, name string) { - for i, clsr := range c.closers { - if clsr.name == name { - c.closers[i] = c.closers[len(c.closers)-1] - c.closers = c.closers[:len(c.closers)-1] - return - } - } -} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go deleted file mode 100644 index 83a9b4d4c..000000000 --- a/cmd/frostfs-node/config.go +++ /dev/null @@ -1,1479 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "io/fs" - "net" - "os" - "os/signal" - "path/filepath" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/audit" - contractsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/contracts" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" - fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" - loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" - replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" - tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" - treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - lsmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - shardmode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - netmap2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone" - tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" - "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - policy_client "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - neogoutil "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" - "go.etcd.io/bbolt" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -const addressSize = 72 // 32 bytes object ID, 32 bytes container ID, 8 bytes protobuf encoding - -const maxMsgSize = 4 << 20 // transport msg limit 4 MiB - -// capacity of the pools of the morph notification handlers -// for each contract listener. -const notificationHandlerPoolSize = 10 - -// applicationConfiguration reads and stores component-specific configuration -// values. It should not store any application helpers structs (pointers to shared -// structs). -// It must not be used concurrently. -type applicationConfiguration struct { - // _read indicated whether a config - // has already been read - _read bool - - LoggerCfg struct { - level string - destination string - timestamp bool - options []zap.Option - tags [][]string - } - - ObjectCfg struct { - tombstoneLifetime uint64 - priorityMetrics []placement.Metric - } - - EngineCfg struct { - errorThreshold uint32 - shards []shardCfg - lowMem bool - } - - // if need to run node in compatibility with other versions mode - cmode *atomic.Bool -} - -type shardCfg struct { - compression compression.Config - - smallSizeObjectLimit uint64 - refillMetabase bool - refillMetabaseWorkersCount int - mode shardmode.Mode - limiter qos.Limiter - - metaCfg struct { - path string - perm fs.FileMode - maxBatchSize int - maxBatchDelay time.Duration - } - - subStorages []subStorageCfg - - gcCfg struct { - removerBatchSize int - removerSleepInterval time.Duration - expiredCollectorBatchSize int - expiredCollectorWorkerCount int - } - - writecacheCfg struct { - enabled bool - path string - maxObjSize uint64 - flushWorkerCount int - sizeLimit uint64 - countLimit uint64 - noSync bool - flushSizeLimit uint64 - } - - piloramaCfg struct { - enabled bool - path string - perm fs.FileMode - noSync bool - maxBatchSize int - maxBatchDelay time.Duration - } -} - -// id returns persistent id of a shard. It is different from the ID used in runtime -// and is primarily used to identify shards in the configuration. -func (c *shardCfg) id() string { - // This calculation should be kept in sync with - // pkg/local_object_storage/engine/control.go file. - var sb strings.Builder - for i := range c.subStorages { - sb.WriteString(filepath.Clean(c.subStorages[i].path)) - } - return sb.String() -} - -type subStorageCfg struct { - // common for all storages - typ string - path string - perm fs.FileMode - depth uint64 - noSync bool - - // blobovnicza-specific - size uint64 - width uint64 - openedCacheSize int - initWorkerCount int - rebuildDropTimeout time.Duration - openedCacheTTL time.Duration - openedCacheExpInterval time.Duration -} - -// readConfig fills applicationConfiguration with raw configuration values -// not modifying them. -func (a *applicationConfiguration) readConfig(c *config.Config) error { - if a._read { - err := c.Reload() - if err != nil { - return fmt.Errorf("could not reload configuration: %w", err) - } - - err = validateConfig(c) - if err != nil { - return fmt.Errorf("configuration's validation: %w", err) - } - - // clear if it is rereading - cmode := a.cmode - *a = applicationConfiguration{} - a.cmode = cmode - } - - a._read = true - a.cmode.Store(nodeconfig.CompatibilityMode(c)) - - // Logger - - a.LoggerCfg.level = loggerconfig.Level(c) - a.LoggerCfg.destination = loggerconfig.Destination(c) - a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) - var opts []zap.Option - if loggerconfig.ToLokiConfig(c).Enabled { - opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) - return lokiCore - })} - } - a.LoggerCfg.options = opts - a.LoggerCfg.tags = loggerconfig.Tags(c) - - // Object - - a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) - locodeDBPath := nodeconfig.LocodeDBPath(c) - parser, err := placement.NewMetricsParser(locodeDBPath) - if err != nil { - return fmt.Errorf("metrics parser creation: %w", err) - } - m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) - if err != nil { - return fmt.Errorf("parse metrics: %w", err) - } - a.ObjectCfg.priorityMetrics = m - - // Storage Engine - - a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) - a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) - - return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) -} - -func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { - var target shardCfg - - target.refillMetabase = source.RefillMetabase() - target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() - target.mode = source.Mode() - target.compression = source.Compression() - target.smallSizeObjectLimit = source.SmallSizeLimit() - - a.setShardWriteCacheConfig(&target, source) - - a.setShardPiloramaConfig(c, &target, source) - - if err := a.setShardStorageConfig(&target, source); err != nil { - return err - } - - a.setMetabaseConfig(&target, source) - - a.setGCConfig(&target, source) - if err := a.setLimiter(&target, source); err != nil { - return err - } - - a.EngineCfg.shards = append(a.EngineCfg.shards, target) - - return nil -} - -func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { - writeCacheCfg := source.WriteCache() - if writeCacheCfg.Enabled() { - wc := &target.writecacheCfg - - wc.enabled = true - wc.path = writeCacheCfg.Path() - wc.maxObjSize = writeCacheCfg.MaxObjectSize() - wc.flushWorkerCount = writeCacheCfg.WorkerCount() - wc.sizeLimit = writeCacheCfg.SizeLimit() - wc.countLimit = writeCacheCfg.CountLimit() - wc.noSync = writeCacheCfg.NoSync() - wc.flushSizeLimit = writeCacheCfg.MaxFlushingObjectsSize() - } -} - -func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { - if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := source.Pilorama() - pr := &target.piloramaCfg - - pr.enabled = true - pr.path = piloramaCfg.Path() - pr.perm = piloramaCfg.Perm() - pr.noSync = piloramaCfg.NoSync() - pr.maxBatchSize = piloramaCfg.MaxBatchSize() - pr.maxBatchDelay = piloramaCfg.MaxBatchDelay() - } -} - -func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { - blobStorCfg := source.BlobStor() - storagesCfg := blobStorCfg.Storages() - - ss := make([]subStorageCfg, 0, len(storagesCfg)) - for i := range storagesCfg { - var sCfg subStorageCfg - - sCfg.typ = storagesCfg[i].Type() - sCfg.path = storagesCfg[i].Path() - sCfg.perm = storagesCfg[i].Perm() - - switch storagesCfg[i].Type() { - case blobovniczatree.Type: - sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i])) - - sCfg.size = sub.Size() - sCfg.depth = sub.ShallowDepth() - sCfg.width = sub.ShallowWidth() - sCfg.openedCacheSize = sub.OpenedCacheSize() - sCfg.openedCacheTTL = sub.OpenedCacheTTL() - sCfg.openedCacheExpInterval = sub.OpenedCacheExpInterval() - sCfg.initWorkerCount = sub.InitWorkerCount() - sCfg.rebuildDropTimeout = sub.RebuildDropTimeout() - case fstree.Type: - sub := fstreeconfig.From((*config.Config)(storagesCfg[i])) - sCfg.depth = sub.Depth() - sCfg.noSync = sub.NoSync() - default: - return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type()) - } - - ss = append(ss, sCfg) - } - - target.subStorages = ss - return nil -} - -func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { - metabaseCfg := source.Metabase() - m := &target.metaCfg - - m.path = metabaseCfg.Path() - m.perm = metabaseCfg.BoltDB().Perm() - m.maxBatchDelay = metabaseCfg.BoltDB().MaxBatchDelay() - m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() -} - -func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { - gcCfg := source.GC() - target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() -} - -func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { - limitsConfig := source.Limits().ToConfig() - limiter, err := qos.NewLimiter(limitsConfig) - if err != nil { - return err - } - target.limiter = limiter - return nil -} - -// internals contains application-specific internals that are created -// on application startup and are shared b/w the components during -// the application life cycle. -// It should not contain any read configuration values, component-specific -// helpers and fields. -type internals struct { - done chan struct{} - ctxCancel func() - internalErr chan error // channel for internal application errors at runtime - - appCfg *config.Config - - log *logger.Logger - - wg sync.WaitGroup - workers []worker - closers []closer - - apiVersion version.Version - healthStatus *atomic.Int32 - // is node under maintenance - isMaintenance atomic.Bool - audit *atomic.Bool - - sdNotify bool -} - -// starts node's maintenance. -func (c *cfg) startMaintenance(ctx context.Context) { - c.isMaintenance.Store(true) - c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance) -} - -// stops node's maintenance. -func (c *internals) stopMaintenance(ctx context.Context) { - if c.isMaintenance.CompareAndSwap(true, false) { - c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance) - } -} - -// IsMaintenance checks if storage node is under maintenance. -// -// Provides util.NodeState to Object service. -func (c *internals) IsMaintenance() bool { - return c.isMaintenance.Load() -} - -// shared contains component-specific structs/helpers that should -// be shared during initialization of the application. -type shared struct { - privateTokenStore sessionStorage - persistate *state.PersistentStorage - - clientCache *cache.ClientCache - bgClientCache *cache.ClientCache - putClientCache *cache.ClientCache - localAddr network.AddressGroup - - key *keys.PrivateKey - binPublicKey []byte - ownerIDFromKey user.ID // user ID calculated from key - - // current network map - netMap atomic.Value // type netmap.NetMap - netMapSource netmapCore.Source - - cnrClient *containerClient.Client - - frostfsidClient frostfsidcore.SubjectProvider - - respSvc *response.Service - - replicator *replicator.Replicator - - treeService *tree.Service - - metricsCollector *metrics.NodeMetrics - - metricsSvc *objectService.MetricCollector - - dialerSource *internalNet.DialerSource -} - -// dynamicConfiguration stores parameters of the -// components that supports runtime reconfigurations. -type dynamicConfiguration struct { - pprof *httpComponent - metrics *httpComponent -} - -type appConfigGuard struct { - mtx sync.RWMutex -} - -func (g *appConfigGuard) LockAppConfigShared() func() { - g.mtx.RLock() - return func() { g.mtx.RUnlock() } -} - -func (g *appConfigGuard) LockAppConfigExclusive() func() { - g.mtx.Lock() - return func() { g.mtx.Unlock() } -} - -type cfg struct { - applicationConfiguration - internals - shared - dynamicConfiguration - appConfigGuard - - // configuration of the internal - // services - cfgGRPC cfgGRPC - cfgMorph cfgMorph - cfgAccounting cfgAccounting - cfgContainer cfgContainer - cfgFrostfsID cfgFrostfsID - cfgNodeInfo cfgNodeInfo - cfgNetmap cfgNetmap - cfgControlService cfgControlService - cfgObject cfgObject - cfgQoSService cfgQoSService -} - -// ReadCurrentNetMap reads network map which has been cached at the -// latest epoch. Returns an error if value has not been cached yet. -// -// Provides interface for NetmapService server. -func (c *cfg) ReadCurrentNetMap(msg *netmapV2.NetMap) error { - val := c.netMap.Load() - if val == nil { - return errors.New("missing local network map") - } - - val.(netmap.NetMap).WriteToV2(msg) - - return nil -} - -type grpcServer struct { - Listener net.Listener - Server *grpc.Server - Endpoint string -} - -type cfgGRPC struct { - // guard protects connections and handlers - guard sync.RWMutex - // servers must be protected with guard - servers []grpcServer - // handlers must be protected with guard - handlers []func(e string, l net.Listener, s *grpc.Server) - - maxChunkSize uint64 - maxAddrAmount uint64 - reconnectTimeout time.Duration - - limiter atomic.Pointer[limiting.SemaphoreLimiter] -} - -func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { - c.guard.Lock() - defer c.guard.Unlock() - - c.servers = append(c.servers, grpcServer{ - Listener: l, - Server: s, - Endpoint: e, - }) -} - -func (c *cfgGRPC) appendAndHandle(e string, l net.Listener, s *grpc.Server) { - c.guard.Lock() - defer c.guard.Unlock() - - c.servers = append(c.servers, grpcServer{ - Listener: l, - Server: s, - Endpoint: e, - }) - - for _, h := range c.handlers { - h(e, l, s) - } -} - -func (c *cfgGRPC) performAndSave(handler func(e string, l net.Listener, s *grpc.Server)) { - c.guard.Lock() - defer c.guard.Unlock() - - for _, conn := range c.servers { - handler(conn.Endpoint, conn.Listener, conn.Server) - } - - c.handlers = append(c.handlers, handler) -} - -func (c *cfgGRPC) dropConnection(endpoint string) { - c.guard.Lock() - defer c.guard.Unlock() - - pos := -1 - for idx, srv := range c.servers { - if srv.Endpoint == endpoint { - pos = idx - break - } - } - if pos < 0 { - return - } - - c.servers[pos].Server.Stop() // closes listener - c.servers = append(c.servers[0:pos], c.servers[pos+1:]...) -} - -type cfgMorph struct { - initialized bool - guard sync.Mutex - - client *client.Client - - // TTL of Sidechain cached values. Non-positive value disables caching. - cacheTTL time.Duration - - containerCacheSize uint32 - - proxyScriptHash neogoutil.Uint160 -} - -type cfgAccounting struct { - scriptHash neogoutil.Uint160 -} - -type cfgContainer struct { - scriptHash neogoutil.Uint160 - - parsers map[event.Type]event.NotificationParser - subscribers map[event.Type][]event.Handler - workerPool util.WorkerPool // pool for asynchronous handlers - containerBatchSize uint32 -} - -type cfgFrostfsID struct { - scriptHash neogoutil.Uint160 -} - -type cfgNetmap struct { - scriptHash neogoutil.Uint160 - wrapper *nmClient.Client - - parsers map[event.Type]event.NotificationParser - - subscribers map[event.Type][]event.Handler - workerPool util.WorkerPool // pool for asynchronous handlers - - state *networkState - - reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime -} - -type cfgNodeInfo struct { - // values from config - localInfo netmap.NodeInfo -} - -type cfgObject struct { - getSvc *getsvc.Service - - cnrSource container.Source - - cfgAccessPolicyEngine cfgAccessPolicyEngine - - pool cfgObjectRoutines - - cfgLocalStorage cfgLocalStorage - - tombstoneLifetime *atomic.Uint64 - - skipSessionTokenIssuerVerification bool -} - -type cfgLocalStorage struct { - localStorage *engine.StorageEngine -} - -type cfgAccessPolicyEngine struct { - policyContractHash neogoutil.Uint160 - - accessPolicyEngine *accessPolicyEngine -} - -type cfgObjectRoutines struct { - replication *ants.Pool -} - -type cfgControlService struct { - server *grpc.Server -} - -var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block") - -func initCfg(appCfg *config.Config) *cfg { - c := &cfg{ - applicationConfiguration: applicationConfiguration{ - cmode: &atomic.Bool{}, - }, - } - - err := c.readConfig(appCfg) - if err != nil { - panic(fmt.Errorf("config reading: %w", err)) - } - - key := nodeconfig.Key(appCfg) - - netState := newNetworkState() - - c.shared = initShared(appCfg, key, netState) - - netState.metrics = c.metricsCollector - - logPrm, err := c.loggerPrm() - fatalOnErr(err) - logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() - log, err := logger.NewLogger(logPrm) - fatalOnErr(err) - logger.UpdateLevelForTags(logPrm) - - c.internals = initInternals(appCfg, log) - - c.cfgAccounting = cfgAccounting{ - scriptHash: contractsconfig.Balance(appCfg), - } - c.cfgContainer = initContainer(appCfg) - - c.cfgFrostfsID = initFrostfsID(appCfg) - - c.cfgNetmap = initNetmap(appCfg, netState) - - c.cfgGRPC = initCfgGRPC() - - c.cfgMorph = cfgMorph{ - proxyScriptHash: contractsconfig.Proxy(appCfg), - } - c.cfgObject = initCfgObject(appCfg) - - user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey) - - c.onShutdown(c.clientCache.CloseAll) // clean up connections - c.onShutdown(c.bgClientCache.CloseAll) // clean up connections - c.onShutdown(c.putClientCache.CloseAll) // clean up connections - c.onShutdown(func() { _ = c.persistate.Close() }) - - return c -} - -func initInternals(appCfg *config.Config, log *logger.Logger) internals { - var healthStatus atomic.Int32 - healthStatus.Store(int32(control.HealthStatus_HEALTH_STATUS_UNDEFINED)) - - var auditRequests atomic.Bool - auditRequests.Store(audit.Enabled(appCfg)) - - return internals{ - done: make(chan struct{}), - appCfg: appCfg, - internalErr: make(chan error), - log: log, - apiVersion: version.Current(), - healthStatus: &healthStatus, - sdNotify: initSdNotify(appCfg), - audit: &auditRequests, - } -} - -func initSdNotify(appCfg *config.Config) bool { - if config.BoolSafe(appCfg.Sub("systemdnotify"), "enabled") { - fatalOnErr(sdnotify.InitSocket()) - return true - } - return false -} - -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { - netAddr := nodeconfig.BootstrapAddresses(appCfg) - - persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) - fatalOnErr(err) - - nodeMetrics := metrics.NewNodeMetrics() - - ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics())) - fatalOnErr(err) - - cacheOpts := cache.ClientCacheOpts{ - DialTimeout: apiclientconfig.DialTimeout(appCfg), - StreamTimeout: apiclientconfig.StreamTimeout(appCfg), - Key: &key.PrivateKey, - AllowExternal: apiclientconfig.AllowExternal(appCfg), - ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), - DialerSource: ds, - } - - return shared{ - key: key, - binPublicKey: key.PublicKey().Bytes(), - localAddr: netAddr, - respSvc: response.NewService(netState), - clientCache: cache.NewSDKClientCache(cacheOpts), - bgClientCache: cache.NewSDKClientCache(cacheOpts), - putClientCache: cache.NewSDKClientCache(cacheOpts), - persistate: persistate, - metricsCollector: nodeMetrics, - dialerSource: ds, - } -} - -func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config { - result := internalNet.Config{ - Enabled: multinet.Enabled(appCfg), - Balancer: multinet.Balancer(appCfg), - Restrict: multinet.Restrict(appCfg), - FallbackDelay: multinet.FallbackDelay(appCfg), - Metrics: m, - } - sn := multinet.Subnets(appCfg) - for _, s := range sn { - result.Subnets = append(result.Subnets, internalNet.Subnet{ - Prefix: s.Mask, - SourceIPs: s.SourceIPs, - }) - } - return result -} - -func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { - netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) - fatalOnErr(err) - - return cfgNetmap{ - scriptHash: contractsconfig.Netmap(appCfg), - state: netState, - workerPool: netmapWorkerPool, - reBoostrapTurnedOff: &atomic.Bool{}, - } -} - -func initContainer(appCfg *config.Config) cfgContainer { - containerWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) - fatalOnErr(err) - - return cfgContainer{ - scriptHash: contractsconfig.Container(appCfg), - workerPool: containerWorkerPool, - } -} - -func initFrostfsID(appCfg *config.Config) cfgFrostfsID { - return cfgFrostfsID{ - scriptHash: contractsconfig.FrostfsID(appCfg), - } -} - -func initCfgGRPC() (cfg cfgGRPC) { - maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload - maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes - - cfg.maxChunkSize = maxChunkSize - cfg.maxAddrAmount = maxAddrAmount - - return -} - -func initCfgObject(appCfg *config.Config) cfgObject { - var tsLifetime atomic.Uint64 - tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg)) - return cfgObject{ - pool: initObjectPool(appCfg), - tombstoneLifetime: &tsLifetime, - skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(), - } -} - -func (c *cfg) engineOpts() []engine.Option { - var opts []engine.Option - - opts = append(opts, - engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log.WithTag(logger.TagEngine)), - engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), - ) - - if c.metricsCollector != nil { - opts = append(opts, engine.WithMetrics(c.metricsCollector.Engine())) - } - - return opts -} - -type shardOptsWithID struct { - configID string - shOpts []shard.Option -} - -func (c *cfg) shardOpts(ctx context.Context) []shardOptsWithID { - shards := make([]shardOptsWithID, 0, len(c.EngineCfg.shards)) - - for _, shCfg := range c.EngineCfg.shards { - shards = append(shards, c.getShardOpts(ctx, shCfg)) - } - - return shards -} - -func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { - var writeCacheOpts []writecache.Option - if wcRead := shCfg.writecacheCfg; wcRead.enabled { - writeCacheOpts = append(writeCacheOpts, - writecache.WithPath(wcRead.path), - writecache.WithFlushSizeLimit(wcRead.flushSizeLimit), - writecache.WithMaxObjectSize(wcRead.maxObjSize), - writecache.WithFlushWorkersCount(wcRead.flushWorkerCount), - writecache.WithMaxCacheSize(wcRead.sizeLimit), - writecache.WithMaxCacheCount(wcRead.countLimit), - writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), - ) - } - return writeCacheOpts -} - -func (c *cfg) getPiloramaOpts(shCfg shardCfg) []pilorama.Option { - var piloramaOpts []pilorama.Option - if prRead := shCfg.piloramaCfg; prRead.enabled { - piloramaOpts = append(piloramaOpts, - pilorama.WithPath(prRead.path), - pilorama.WithPerm(prRead.perm), - pilorama.WithNoSync(prRead.noSync), - pilorama.WithMaxBatchSize(prRead.maxBatchSize), - pilorama.WithMaxBatchDelay(prRead.maxBatchDelay), - ) - if c.metricsCollector != nil { - piloramaOpts = append(piloramaOpts, pilorama.WithMetrics(lsmetrics.NewPiloramaMetrics(c.metricsCollector.PiloramaMetrics()))) - } - } - return piloramaOpts -} - -func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.SubStorage { - var ss []blobstor.SubStorage - for _, sRead := range shCfg.subStorages { - switch sRead.typ { - case blobovniczatree.Type: - blobTreeOpts := []blobovniczatree.Option{ - blobovniczatree.WithRootPath(sRead.path), - blobovniczatree.WithPermissions(sRead.perm), - blobovniczatree.WithBlobovniczaSize(sRead.size), - blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth), - blobovniczatree.WithBlobovniczaShallowWidth(sRead.width), - blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize), - blobovniczatree.WithOpenedCacheTTL(sRead.openedCacheTTL), - blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), - blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), - blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), - blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), - blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), - } - - if c.metricsCollector != nil { - blobTreeOpts = append(blobTreeOpts, - blobovniczatree.WithMetrics( - lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobobvnizcaTreeMetrics()), - ), - ) - } - ss = append(ss, blobstor.SubStorage{ - Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < shCfg.smallSizeObjectLimit - }, - }) - case fstree.Type: - fstreeOpts := []fstree.Option{ - fstree.WithPath(sRead.path), - fstree.WithPerm(sRead.perm), - fstree.WithDepth(sRead.depth), - fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), - } - if c.metricsCollector != nil { - fstreeOpts = append(fstreeOpts, - fstree.WithMetrics( - lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()), - ), - ) - } - - ss = append(ss, blobstor.SubStorage{ - Storage: fstree.New(fstreeOpts...), - Policy: func(_ *objectSDK.Object, _ []byte) bool { - return true - }, - }) - default: - // should never happen, that has already - // been handled: when the config was read - } - } - return ss -} - -func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID { - writeCacheOpts := c.getWriteCacheOpts(shCfg) - piloramaOpts := c.getPiloramaOpts(shCfg) - ss := c.getSubstorageOpts(ctx, shCfg) - - blobstoreOpts := []blobstor.Option{ - blobstor.WithCompression(shCfg.compression), - blobstor.WithStorages(ss), - blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), - } - if c.metricsCollector != nil { - blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) - } - - mbOptions := []meta.Option{ - meta.WithPath(shCfg.metaCfg.path), - meta.WithPermissions(shCfg.metaCfg.perm), - meta.WithMaxBatchSize(shCfg.metaCfg.maxBatchSize), - meta.WithMaxBatchDelay(shCfg.metaCfg.maxBatchDelay), - meta.WithBoltDBOptions(&bbolt.Options{ - Timeout: 100 * time.Millisecond, - }), - meta.WithLogger(c.log), - meta.WithEpochState(c.cfgNetmap.state), - } - if c.metricsCollector != nil { - mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) - shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics()) - } - - var sh shardOptsWithID - sh.configID = shCfg.id() - sh.shOpts = []shard.Option{ - shard.WithLogger(c.log.WithTag(logger.TagShard)), - shard.WithRefillMetabase(shCfg.refillMetabase), - shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), - shard.WithMode(shCfg.mode), - shard.WithBlobStorOptions(blobstoreOpts...), - shard.WithMetaBaseOptions(mbOptions...), - shard.WithPiloramaOptions(piloramaOpts...), - shard.WithWriteCache(shCfg.writecacheCfg.enabled), - shard.WithWriteCacheOptions(writeCacheOpts), - shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize), - shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval), - shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize), - shard.WithExpiredCollectorWorkerCount(shCfg.gcCfg.expiredCollectorWorkerCount), - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - fatalOnErr(err) - - return pool - }), - shard.WithLimiter(shCfg.limiter), - } - return sh -} - -func (c *cfg) loggerPrm() (logger.Prm, error) { - var prm logger.Prm - // (re)init read configuration - err := prm.SetLevelString(c.LoggerCfg.level) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) - } - err = prm.SetDestination(c.LoggerCfg.destination) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) - } - prm.PrependTimestamp = c.LoggerCfg.timestamp - prm.Options = c.LoggerCfg.options - err = prm.SetTags(c.LoggerCfg.tags) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) - } - - return prm, nil -} - -func (c *cfg) LocalAddress() network.AddressGroup { - return c.localAddr -} - -func initLocalStorage(ctx context.Context, c *cfg) { - ls := engine.New(c.engineOpts()...) - - addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { - ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber()) - }) - - // allocate memory for the service; - // service will be created later - c.cfgObject.getSvc = new(getsvc.Service) - - var shardsAttached int - for _, optsWithMeta := range c.shardOpts(ctx) { - id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, - shard.WithTombstoneSource(c.createTombstoneSource()), - shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) - } else { - shardsAttached++ - c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) - } - } - if shardsAttached == 0 { - fatalOnErr(engineconfig.ErrNoShardConfigured) - } - - c.cfgObject.cfgLocalStorage.localStorage = ls - - c.onShutdown(func() { - c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine) - - err := ls.Close(context.WithoutCancel(ctx)) - if err != nil { - c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, - zap.Error(err), - ) - } else { - c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) - } - }) -} - -func initAccessPolicyEngine(ctx context.Context, c *cfg) { - var localOverrideDB chainbase.LocalOverrideDatabase - if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" { - c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) - localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() - } else { - localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( - chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()), - chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()), - chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()), - ) - } - - var morphRuleStorage policy_engine.MorphRuleChainStorageReader - morphRuleStorage = policy_client.NewContractStorage( - client.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.cfgObject.cfgAccessPolicyEngine.policyContractHash) - - cacheSize := morphconfig.APEChainCacheSize(c.appCfg) - if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { - morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) - } - - ape := newAccessPolicyEngine(morphRuleStorage, localOverrideDB) - c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine = ape - - c.onShutdown(func() { - if err := ape.LocalOverrideDatabaseCore().Close(); err != nil { - c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure, - zap.Error(err), - ) - } - }) -} - -func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { - var err error - - replicatorPoolSize := replicatorconfig.PoolSize(cfg) - pool.replication, err = ants.NewPool(replicatorPoolSize) - fatalOnErr(err) - - return pool -} - -func (c *cfg) LocalNodeInfo() *netmap.NodeInfo { - var res netmap.NodeInfo - ni, ok := c.cfgNetmap.state.getNodeInfo() - if ok { - res = ni - } else { - res = c.cfgNodeInfo.localInfo - } - return &res -} - -// setContractNodeInfo rewrites local node info from the FrostFS network map. -// Called with nil when storage node is outside the FrostFS network map -// (before entering the network and after leaving it). -func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { - c.cfgNetmap.state.setNodeInfo(ni) -} - -func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { - ni, err := c.netmapLocalNodeState(ctx, epoch) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, - zap.Uint64("epoch", epoch), - zap.Error(err)) - return - } - - c.setContractNodeInfo(ni) -} - -// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract -// with the binary-encoded information from the current node's configuration. -// The state is set using the provided setter which MUST NOT be nil. -func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error { - ni := c.cfgNodeInfo.localInfo - ni.SetStatus(state) - - prm := nmClient.AddPeerPrm{} - prm.SetNodeInfo(ni) - - return c.cfgNetmap.wrapper.AddPeer(ctx, prm) -} - -// bootstrapOnline calls cfg.bootstrapWithState with "online" state. -func bootstrapOnline(ctx context.Context, c *cfg) error { - return c.bootstrapWithState(ctx, netmap.Online) -} - -// bootstrap calls bootstrapWithState with: -// - "maintenance" state if maintenance is in progress on the current node -// - "online", otherwise -func (c *cfg) bootstrap(ctx context.Context) error { - // switch to online except when under maintenance - st := c.cfgNetmap.state.controlNetmapStatus() - if st == control.NetmapStatus_MAINTENANCE { - c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState(ctx, netmap.Maintenance) - } - - c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, - zap.Stringer("previous", st), - ) - - return bootstrapOnline(ctx, c) -} - -type dCmp struct { - name string - reloadFunc func() error -} - -func (c *cfg) signalWatcher(ctx context.Context) { - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) - - sighupCh := make(chan os.Signal, 1) - signal.Notify(sighupCh, syscall.SIGHUP) - - for { - select { - // signals causing application to shut down should have priority over - // reconfiguration signal - case <-ch: - c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - - c.shutdown(ctx) - - c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) - return - case err := <-c.internalErr: // internal application error - c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, - zap.String("message", err.Error())) - - c.shutdown(ctx) - - c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) - return - default: - // block until any signal is receieved - select { - case <-sighupCh: - c.reloadConfig(ctx) - case <-ch: - c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - - c.shutdown(ctx) - - c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) - return - case err := <-c.internalErr: // internal application error - c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, - zap.String("message", err.Error())) - - c.shutdown(ctx) - - c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) - return - } - } - } -} - -func (c *cfg) reloadConfig(ctx context.Context) { - c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) - - if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { - c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) - return - } - defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) - - err := c.reloadAppConfig() - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) - return - } - - // all the components are expected to support - // Logger's dynamic reconfiguration approach - - components := c.getComponents(ctx) - - // Object - c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) - - // Storage Engine - - var rcfg engine.ReConfiguration - for _, optsWithID := range c.shardOpts(ctx) { - rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, - shard.WithTombstoneSource(c.createTombstoneSource()), - shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)), - )) - } - - err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) - return - } - - for _, component := range components { - err = component.reloadFunc() - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying, - zap.String("component", component.name), - zap.Error(err)) - } - } - - if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil { - c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err)) - return - } - - c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) -} - -func (c *cfg) getComponents(ctx context.Context) []dCmp { - var components []dCmp - - components = append(components, dCmp{"logger", func() error { - prm, err := c.loggerPrm() - if err != nil { - return err - } - logger.UpdateLevelForTags(prm) - return nil - }}) - components = append(components, dCmp{"runtime", func() error { - setRuntimeParameters(ctx, c) - return nil - }}) - components = append(components, dCmp{"audit", func() error { - c.audit.Store(audit.Enabled(c.appCfg)) - return nil - }}) - components = append(components, dCmp{"pools", c.reloadPools}) - components = append(components, dCmp{"tracing", func() error { - traceConfig, err := tracingconfig.ToTracingConfig(c.appCfg) - if err != nil { - return err - } - updated, err := tracing.Setup(ctx, *traceConfig) - if updated { - c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated) - } - return err - }}) - if c.treeService != nil { - components = append(components, dCmp{"tree", func() error { - c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) - return nil - }}) - } - if cmp, updated := metricsComponent(c); updated { - if cmp.enabled { - cmp.preReload = enableMetricsSvc - } else { - cmp.preReload = disableMetricsSvc - } - components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) - } - if cmp, updated := pprofComponent(c); updated { - components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) - } - - components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }}) - - return components -} - -func (c *cfg) reloadPools() error { - newSize := replicatorconfig.PoolSize(c.appCfg) - c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") - - return nil -} - -func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) { - oldSize := p.Cap() - if oldSize != newSize { - c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name), - zap.Int("old", oldSize), zap.Int("new", newSize)) - p.Tune(newSize) - } -} - -func (c *cfg) reloadAppConfig() error { - unlock := c.LockAppConfigExclusive() - defer unlock() - - return c.readConfig(c.appCfg) -} - -func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker { - var tssPrm tsourse.TombstoneSourcePrm - tssPrm.SetGetService(c.cfgObject.getSvc) - tombstoneSrc := tsourse.NewSource(tssPrm) - - tombstoneSource := tombstone.NewChecker( - tombstone.WithLogger(c.log), - tombstone.WithTombstoneSource(tombstoneSrc), - ) - return tombstoneSource -} - -func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { - return container.NewInfoProvider(func() (container.Source, error) { - c.initMorphComponents(ctx) - cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) - if err != nil { - return nil, err - } - return containerClient.AsContainerSource(cc), nil - }) -} - -func (c *cfg) shutdown(ctx context.Context) { - old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) - if old == control.HealthStatus_SHUTTING_DOWN { - c.log.Info(ctx, logs.FrostFSNodeShutdownSkip) - return - } - if old == control.HealthStatus_STARTING { - c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady) - } - - c.ctxCancel() - close(c.done) - for i := range c.closers { - c.closers[len(c.closers)-1-i].fn() - } - - if err := sdnotify.ClearStatus(); err != nil { - c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) - } -} diff --git a/cmd/frostfs-node/config/apiclient/config.go b/cmd/frostfs-node/config/apiclient/config.go deleted file mode 100644 index 7d82a4a18..000000000 --- a/cmd/frostfs-node/config/apiclient/config.go +++ /dev/null @@ -1,64 +0,0 @@ -package apiclientconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "apiclient" - - // DialTimeoutDefault is a default dial timeout of FrostFS API client connection. - DialTimeoutDefault = 5 * time.Second - - // StreamTimeoutDefault is a default timeout of FrostFS API streaming operation. - StreamTimeoutDefault = 15 * time.Second -) - -// DialTimeout returns the value of "dial_timeout" config parameter -// from "apiclient" section. -// -// Returns DialTimeoutDefault if the value is not positive duration. -func DialTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "dial_timeout") - if v > 0 { - return v - } - - return DialTimeoutDefault -} - -// StreamTimeout returns the value of "stream_timeout" config parameter -// from "apiclient" section. -// -// Returns DialTimeoutDefault if the value is not positive duration. -func StreamTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "stream_timeout") - if v > 0 { - return v - } - - return StreamTimeoutDefault -} - -// ReconnectTimeout returns the value of "reconnect_timeout" config parameter -// from "apiclient" section. -// -// Returns 0 if the value is not positive duration. -func ReconnectTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "reconnect_timeout") - if v > 0 { - return v - } - - return 0 -} - -// AllowExternal returns the value of "allow_external" config parameter -// from "apiclient" section. -// -// Returns false if the value is missing or invalid. -func AllowExternal(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "allow_external") -} diff --git a/cmd/frostfs-node/config/apiclient/config_test.go b/cmd/frostfs-node/config/apiclient/config_test.go deleted file mode 100644 index cdfa5c401..000000000 --- a/cmd/frostfs-node/config/apiclient/config_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package apiclientconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestApiclientSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Equal(t, apiclientconfig.DialTimeoutDefault, apiclientconfig.DialTimeout(empty)) - require.Equal(t, apiclientconfig.StreamTimeoutDefault, apiclientconfig.StreamTimeout(empty)) - require.Equal(t, time.Duration(0), apiclientconfig.ReconnectTimeout(empty)) - require.False(t, apiclientconfig.AllowExternal(empty)) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, 15*time.Second, apiclientconfig.DialTimeout(c)) - require.Equal(t, 20*time.Second, apiclientconfig.StreamTimeout(c)) - require.Equal(t, 30*time.Second, apiclientconfig.ReconnectTimeout(c)) - require.True(t, apiclientconfig.AllowExternal(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/audit/config.go b/cmd/frostfs-node/config/audit/config.go deleted file mode 100644 index 8f728c850..000000000 --- a/cmd/frostfs-node/config/audit/config.go +++ /dev/null @@ -1,12 +0,0 @@ -package audit - -import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - -const ( - subsection = "audit" -) - -// Enabled returns the value of "enabled" config parameter from "audit" section. -func Enabled(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "enabled") -} diff --git a/cmd/frostfs-node/config/audit/config_test.go b/cmd/frostfs-node/config/audit/config_test.go deleted file mode 100644 index 7731cc8e6..000000000 --- a/cmd/frostfs-node/config/audit/config_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package audit - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestAuditSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - require.Equal(t, false, Enabled(empty)) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, true, Enabled(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go deleted file mode 100644 index c40bf3620..000000000 --- a/cmd/frostfs-node/config/calls.go +++ /dev/null @@ -1,57 +0,0 @@ -package config - -import ( - "slices" - "strings" - - configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" -) - -// Sub returns a subsection of the Config by name. -// -// Returns nil if subsection is missing. -func (x *Config) Sub(name string) *Config { - // copy path in order to prevent consequent violations - ln := len(x.path) - - path := make([]string, ln, ln+1) - - copy(path, x.path) - - var defaultPath []string - if x.defaultPath != nil { - ln := len(x.defaultPath) - defaultPath = make([]string, ln, ln+1) - copy(defaultPath, x.defaultPath) - } - - return &Config{ - v: x.v, - path: append(path, name), - defaultPath: append(defaultPath, name), - } -} - -// Value returns the configuration value by name. -// -// Result can be casted to a particular type -// via corresponding function (e.g. StringSlice). -// Note: casting via Go `.()` operator is not -// recommended. -// -// Returns nil if config is nil. -func (x *Config) Value(name string) any { - value := x.v.Get(strings.Join(append(x.path, name), configViper.Separator)) - if value != nil || x.defaultPath == nil { - return value - } - return x.v.Get(strings.Join(append(x.defaultPath, name), configViper.Separator)) -} - -// SetDefault sets fallback config for missing values. -// -// It supports only one level of nesting and is intended to be used -// to provide default values. -func (x *Config) SetDefault(from *Config) { - x.defaultPath = slices.Clone(from.path) -} diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go deleted file mode 100644 index bc149eb7d..000000000 --- a/cmd/frostfs-node/config/calls_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package config_test - -import ( - "strings" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" - "github.com/stretchr/testify/require" -) - -func TestConfigCommon(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - val := c.Value("value") - require.NotNil(t, val) - - val = c.Value("non-existent value") - require.Nil(t, val) - - sub := c.Sub("section") - require.NotNil(t, sub) - - const nonExistentSub = "non-existent sub-section" - - val = c.Sub(nonExistentSub).Value("value") - require.Nil(t, val) - }) -} - -func TestConfigEnv(t *testing.T) { - const ( - name = "name" - section = "section" - value = "some value" - ) - - envName := strings.ToUpper( - strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator)) - t.Setenv(envName, value) - - c := configtest.EmptyConfig() - - require.Equal(t, value, c.Sub(section).Value(name)) -} - -func TestConfig_SubValue(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c. - Sub("section"). - Sub("sub"). - Sub("sub") - - // get subsection 1 - sub := c.Sub("sub1") - - // get subsection 2 - c.Sub("sub2") - - // sub should not be corrupted - require.Equal(t, "val1", sub.Value("key")) - }) -} - -func TestConfig_SetDefault(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c.Sub("with_default") - s := c.Sub("custom") - s.SetDefault(c.Sub("default")) - - require.Equal(t, int64(42), config.Int(s, "missing")) - require.Equal(t, "b", config.String(s, "overridden")) - require.Equal(t, false, config.Bool(s, "overridden_with_default")) - - // Default can be set only once. - s = s.Sub("sub") - require.Equal(t, int64(123), config.Int(s, "missing")) - require.Equal(t, "y", config.String(s, "overridden")) - }) -} diff --git a/cmd/frostfs-node/config/cast.go b/cmd/frostfs-node/config/cast.go deleted file mode 100644 index beec09a9e..000000000 --- a/cmd/frostfs-node/config/cast.go +++ /dev/null @@ -1,237 +0,0 @@ -package config - -import ( - "math/bits" - "strings" - "time" - "unicode" - - "github.com/spf13/cast" -) - -func panicOnErr(err error) { - if err != nil { - panic(err) - } -} - -// StringSlice reads a configuration value -// from c by name and casts it to []string. -// -// Panics if the value can not be casted. -func StringSlice(c *Config, name string) []string { - x, err := cast.ToStringSliceE(c.Value(name)) - panicOnErr(err) - - return x -} - -// StringSliceSafe reads a configuration value -// from c by name and casts it to []string. -// -// Returns nil if the value can not be casted. -func StringSliceSafe(c *Config, name string) []string { - return cast.ToStringSlice(c.Value(name)) -} - -// String reads a configuration value -// from c by name and casts it to string. -// -// Panics if the value can not be casted. -func String(c *Config, name string) string { - x, err := cast.ToStringE(c.Value(name)) - panicOnErr(err) - - return x -} - -// StringSafe reads a configuration value -// from c by name and casts it to string. -// -// Returns "" if the value can not be casted. -func StringSafe(c *Config, name string) string { - return cast.ToString(c.Value(name)) -} - -// Duration reads a configuration value -// from c by name and casts it to time.Duration. -// -// Panics if the value can not be casted. -func Duration(c *Config, name string) time.Duration { - x, err := cast.ToDurationE(c.Value(name)) - panicOnErr(err) - - return x -} - -// DurationSafe reads a configuration value -// from c by name and casts it to time.Duration. -// -// Returns 0 if the value can not be casted. -func DurationSafe(c *Config, name string) time.Duration { - return cast.ToDuration(c.Value(name)) -} - -// Bool reads a configuration value -// from c by name and casts it to bool. -// -// Panics if the value can not be casted. -func Bool(c *Config, name string) bool { - x, err := cast.ToBoolE(c.Value(name)) - panicOnErr(err) - - return x -} - -// BoolSafe reads a configuration value -// from c by name and casts it to bool. -// -// Returns false if the value can not be casted. -func BoolSafe(c *Config, name string) bool { - return cast.ToBool(c.Value(name)) -} - -// Uint32 reads a configuration value -// from c by name and casts it to uint32. -// -// Panics if the value can not be casted. -func Uint32(c *Config, name string) uint32 { - x, err := cast.ToUint32E(c.Value(name)) - panicOnErr(err) - - return x -} - -// Uint32Safe reads a configuration value -// from c by name and casts it to uint32. -// -// Returns 0 if the value can not be casted. -func Uint32Safe(c *Config, name string) uint32 { - return cast.ToUint32(c.Value(name)) -} - -// Uint reads a configuration value -// from c by name and casts it to uint64. -// -// Panics if the value can not be casted. -func Uint(c *Config, name string) uint64 { - x, err := cast.ToUint64E(c.Value(name)) - panicOnErr(err) - - return x -} - -// UintSafe reads a configuration value -// from c by name and casts it to uint64. -// -// Returns 0 if the value can not be casted. -func UintSafe(c *Config, name string) uint64 { - return cast.ToUint64(c.Value(name)) -} - -// Int reads a configuration value -// from c by name and casts it to int64. -// -// Panics if the value can not be casted. -func Int(c *Config, name string) int64 { - x, err := cast.ToInt64E(c.Value(name)) - panicOnErr(err) - - return x -} - -// IntSafe reads a configuration value -// from c by name and casts it to int64. -// -// Returns 0 if the value can not be casted. -func IntSafe(c *Config, name string) int64 { - return cast.ToInt64(c.Value(name)) -} - -// SizeInBytesSafe reads a configuration value -// from c by name and casts it to size in bytes (uint64). -// -// The suffix can be single-letter (b, k, m, g, t) or with -// an additional b at the end. Spaces between the number and the suffix -// are allowed. All multipliers are power of 2 (i.e. k is for kibi-byte). -// -// Returns 0 if a value can't be casted. -func SizeInBytesSafe(c *Config, name string) uint64 { - s := StringSafe(c, name) - return parseSizeInBytes(s) -} - -// The following code is taken from https://github.com/spf13/viper/blob/master/util.go -// with minor corrections (allow to use both `k` and `kb` forms. -// Seems like viper allows to convert sizes but corresponding parser in `cast` package -// is missing. - -// safeMul returns size*multiplier, rounding down to the -// multiplier/1024 number of bytes. -// Returns 0 if overflow is detected. -func safeMul(size float64, multiplier uint64) uint64 { - n := uint64(size) - f := uint64((size - float64(n)) * 1024) - if f != 0 && multiplier != 1 { - s := n<<10 + f - if s < n { - return 0 - } - - n = s - multiplier >>= 10 - } - - hi, lo := bits.Mul64(n, multiplier) - if hi != 0 { - return 0 - } - return lo -} - -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes. -func parseSizeInBytes(sizeStr string) uint64 { - sizeStr = strings.TrimSpace(sizeStr) - lastChar := len(sizeStr) - 1 - multiplier := uint64(1) - - if lastChar > 0 { - if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { - lastChar-- - } - if lastChar >= 0 { - switch unicode.ToLower(rune(sizeStr[lastChar])) { - case 'k': - multiplier = 1 << 10 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - case 'm': - multiplier = 1 << 20 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - case 'g': - multiplier = 1 << 30 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - case 't': - multiplier = 1 << 40 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - default: - multiplier = 1 - sizeStr = strings.TrimSpace(sizeStr[:lastChar+1]) - } - } - } - - size := cast.ToFloat64(sizeStr) - return safeMul(size, multiplier) -} - -// FloatOrDefault reads a configuration value -// from c by name and casts it to float64. -// -// Returns defaultValue if the value can not be casted. -func FloatOrDefault(c *Config, name string, defaultValue float64) float64 { - v, err := cast.ToFloat64E(c.Value(name)) - if err != nil { - return defaultValue - } - return v -} diff --git a/cmd/frostfs-node/config/cast_test.go b/cmd/frostfs-node/config/cast_test.go deleted file mode 100644 index f8c1ee28e..000000000 --- a/cmd/frostfs-node/config/cast_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package config_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestStringSlice(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - cStringSlice := c.Sub("string_slice") - - val := config.StringSlice(cStringSlice, "empty") - require.Empty(t, val) - - val = config.StringSlice(cStringSlice, "filled") - require.Equal(t, []string{ - "string1", - "string2", - }, val) - - require.Panics(t, func() { - config.StringSlice(cStringSlice, "incorrect") - }) - - val = config.StringSliceSafe(cStringSlice, "incorrect") - require.Nil(t, val) - }) -} - -func TestString(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c.Sub("string") - - val := config.String(c, "correct") - require.Equal(t, "some string", val) - - require.Panics(t, func() { - config.String(c, "incorrect") - }) - - val = config.StringSafe(c, "incorrect") - require.Empty(t, val) - }) -} - -func TestDuration(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c.Sub("duration") - - val := config.Duration(c, "correct") - require.Equal(t, 15*time.Minute, val) - - require.Panics(t, func() { - config.Duration(c, "incorrect") - }) - - val = config.DurationSafe(c, "incorrect") - require.Equal(t, time.Duration(0), val) - }) -} - -func TestBool(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c.Sub("bool") - - val := config.Bool(c, "correct") - require.Equal(t, true, val) - - val = config.Bool(c, "correct_string") - require.Equal(t, true, val) - - require.Panics(t, func() { - config.Bool(c, "incorrect") - }) - - val = config.BoolSafe(c, "incorrect") - require.Equal(t, false, val) - }) -} - -func TestNumbers(t *testing.T) { - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c.Sub("number") - - const ( - intPos = "int_pos" - intNeg = "int_neg" - - fractPos = "fract_pos" - fractNeg = "fract_neg" - - incorrect = "incorrect" - ) - - require.EqualValues(t, 1, config.Int(c, intPos)) - require.EqualValues(t, 1, config.Uint(c, intPos)) - - require.EqualValues(t, -1, config.Int(c, intNeg)) - require.Panics(t, func() { config.Uint(c, intNeg) }) - - require.EqualValues(t, 2, config.Int(c, fractPos)) - require.EqualValues(t, 2, config.Uint(c, fractPos)) - - require.EqualValues(t, -2, config.Int(c, fractNeg)) - require.Panics(t, func() { config.Uint(c, fractNeg) }) - - require.Panics(t, func() { config.Int(c, incorrect) }) - require.Panics(t, func() { config.Uint(c, incorrect) }) - - require.Zero(t, config.IntSafe(c, incorrect)) - require.Zero(t, config.UintSafe(c, incorrect)) - }) -} - -func TestSizeInBytes(t *testing.T) { - const ( - kb = 1024 - mb = 1024 * kb - gb = 1024 * mb - tb = 1024 * gb - ) - configtest.ForEachFileType("test/config", func(c *config.Config) { - c = c.Sub("sizes") - require.EqualValues(t, 1, config.SizeInBytesSafe(c, "size_b")) - require.EqualValues(t, kb, config.SizeInBytesSafe(c, "size_k")) - require.EqualValues(t, kb, config.SizeInBytesSafe(c, "size_kb")) - require.EqualValues(t, 2*kb, config.SizeInBytesSafe(c, "size_kb_no_space")) - require.EqualValues(t, 12*mb, config.SizeInBytesSafe(c, "size_m")) - require.EqualValues(t, 12*mb, config.SizeInBytesSafe(c, "size_mb")) - require.EqualValues(t, 4*gb, config.SizeInBytesSafe(c, "size_g")) - require.EqualValues(t, 4*gb, config.SizeInBytesSafe(c, "size_gb")) - require.EqualValues(t, 5*tb, config.SizeInBytesSafe(c, "size_t")) - require.EqualValues(t, 5*tb, config.SizeInBytesSafe(c, "size_tb")) - require.EqualValues(t, 12, config.SizeInBytesSafe(c, "size_i_am_not_very_clever")) - require.EqualValues(t, tb/2, config.SizeInBytesSafe(c, "size_float")) - require.EqualValues(t, uint64(14*gb+(gb*123/1000/mb*mb)), config.SizeInBytesSafe(c, "size_float_big")) - require.EqualValues(t, 2048, config.SizeInBytesSafe(c, "size_bytes")) - require.EqualValues(t, 123456, config.SizeInBytesSafe(c, "size_bytes_no_suffix")) - }) -} diff --git a/cmd/frostfs-node/config/config.go b/cmd/frostfs-node/config/config.go deleted file mode 100644 index d74e820ac..000000000 --- a/cmd/frostfs-node/config/config.go +++ /dev/null @@ -1,59 +0,0 @@ -package config - -import ( - configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" - "github.com/spf13/viper" -) - -// Config represents a group of named values structured -// by tree type. -// -// Sub-trees are named configuration sub-sections, -// leaves are named configuration values. -// Names are of string type. -type Config struct { - v *viper.Viper - - configFile string - configDir string - envPrefix string - - defaultPath []string - path []string -} - -const ( - // EnvPrefix is a prefix of ENV variables related - // to storage node configuration. - EnvPrefix = "FROSTFS" -) - -// New creates a new Config instance. -// -// If file option is provided, -// configuration values are read from it. -// Otherwise, Config is a degenerate tree. -func New(configFile, configDir, envPrefix string) *Config { - v, err := configViper.CreateViper( - configViper.WithConfigFile(configFile), - configViper.WithConfigDir(configDir), - configViper.WithEnvPrefix(envPrefix)) - if err != nil { - panic(err) - } - - return &Config{ - v: v, - configFile: configFile, - configDir: configDir, - envPrefix: envPrefix, - } -} - -// Reload reads configuration path if it was provided to New. -func (x *Config) Reload() error { - return configViper.ReloadViper( - configViper.WithViper(x.v), configViper.WithConfigFile(x.configFile), - configViper.WithConfigDir(x.configDir), - configViper.WithEnvPrefix(x.envPrefix)) -} diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go deleted file mode 100644 index ee9d4268b..000000000 --- a/cmd/frostfs-node/config/configdir_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import ( - "os" - "path" - "testing" - - "github.com/spf13/cast" - "github.com/stretchr/testify/require" -) - -func TestConfigDir(t *testing.T) { - dir := t.TempDir() - - cfgFileName := path.Join(dir, "cfg_01.yml") - - require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) - - c := New("", dir, "") - require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) -} diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go deleted file mode 100644 index 1cd64a6f8..000000000 --- a/cmd/frostfs-node/config/container/container.go +++ /dev/null @@ -1,27 +0,0 @@ -package containerconfig - -import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - -const ( - subsection = "container" - listStreamSubsection = "list_stream" - - // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once. - ContainerBatchSizeDefault = 1000 -) - -// ContainerBatchSize returns the value of "batch_size" config parameter -// from "list_stream" subsection of "container" section. -// -// Returns ContainerBatchSizeDefault if the value is missing or if -// the value is not positive integer. -func ContainerBatchSize(c *config.Config) uint32 { - if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil { - return ContainerBatchSizeDefault - } - size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size") - if size == 0 { - return ContainerBatchSizeDefault - } - return size -} diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go deleted file mode 100644 index 744cd3295..000000000 --- a/cmd/frostfs-node/config/container/container_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package containerconfig_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestContainerSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty)) - }) - - const path = "../../../../config/example/node" - fileConfigTest := func(c *config.Config) { - require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/contracts/config.go b/cmd/frostfs-node/config/contracts/config.go deleted file mode 100644 index df0c0b958..000000000 --- a/cmd/frostfs-node/config/contracts/config.go +++ /dev/null @@ -1,71 +0,0 @@ -package contractsconfig - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -const ( - subsection = "contracts" -) - -// Netmap returns the value of "netmap" config parameter -// from "contracts" section. -// -// Returns zero filled script hash if the value is not set. -// Throws panic if the value is not a 20-byte LE hex-encoded string. -func Netmap(c *config.Config) util.Uint160 { - return contractAddress(c, "netmap") -} - -// Balance returns the value of "balance" config parameter -// from "contracts" section. -// -// Returns zero filled script hash if the value is not set. -// Throws panic if the value is not a 20-byte LE hex-encoded string. -func Balance(c *config.Config) util.Uint160 { - return contractAddress(c, "balance") -} - -// Container returns the value of "container" config parameter -// from "contracts" section. -// -// Returns zero filled script hash if the value is not set. -// Throws panic if the value is not a 20-byte LE hex-encoded string. -func Container(c *config.Config) util.Uint160 { - return contractAddress(c, "container") -} - -func FrostfsID(c *config.Config) util.Uint160 { - return contractAddress(c, "frostfsid") -} - -// Proxy returnsthe value of "proxy" config parameter -// from "contracts" section. -// -// Returns zero filled script hash if the value is not set. -// Throws panic if the value is not a 20-byte LE hex-encoded string. -func Proxy(c *config.Config) util.Uint160 { - return contractAddress(c, "proxy") -} - -func contractAddress(c *config.Config, name string) util.Uint160 { - v := config.String(c.Sub(subsection), name) - if v == "" { - return util.Uint160{} // if address is not set, then NNS resolver should be used - } - - addr, err := util.Uint160DecodeStringLE(v) - if err != nil { - panic(fmt.Errorf( - "can't parse %s contract address %s: %w", - name, - v, - err, - )) - } - - return addr -} diff --git a/cmd/frostfs-node/config/contracts/config_test.go b/cmd/frostfs-node/config/contracts/config_test.go deleted file mode 100644 index c85a625c5..000000000 --- a/cmd/frostfs-node/config/contracts/config_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package contractsconfig_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - contractsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/contracts" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestContractsSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - emptyHash := util.Uint160{} - - require.Equal(t, emptyHash, contractsconfig.Balance(empty)) - require.Equal(t, emptyHash, contractsconfig.Container(empty)) - require.Equal(t, emptyHash, contractsconfig.Netmap(empty)) - require.Equal(t, emptyHash, contractsconfig.Proxy(empty)) - }) - - const path = "../../../../config/example/node" - - expBalance, err := util.Uint160DecodeStringLE("5263abba1abedbf79bb57f3e40b50b4425d2d6cd") - require.NoError(t, err) - - expConatiner, err := util.Uint160DecodeStringLE("5d084790d7aa36cea7b53fe897380dab11d2cd3c") - require.NoError(t, err) - - expNetmap, err := util.Uint160DecodeStringLE("0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca") - require.NoError(t, err) - - expProxy, err := util.Uint160DecodeStringLE("ad7c6b55b737b696e5c82c85445040964a03e97f") - require.NoError(t, err) - - fileConfigTest := func(c *config.Config) { - balance := contractsconfig.Balance(c) - container := contractsconfig.Container(c) - netmap := contractsconfig.Netmap(c) - proxy := contractsconfig.Proxy(c) - - require.Equal(t, expBalance, balance) - require.Equal(t, expConatiner, container) - require.Equal(t, expNetmap, netmap) - require.Equal(t, expProxy, proxy) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/control/config.go b/cmd/frostfs-node/config/control/config.go deleted file mode 100644 index 6bc06b76d..000000000 --- a/cmd/frostfs-node/config/control/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package controlconfig - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// GRPCConfig is a wrapper over "grpc" config section which provides access -// to gRPC configuration of control service. -type GRPCConfig struct { - cfg *config.Config -} - -const ( - subsection = "control" - grpcSubsection = "grpc" - - // GRPCEndpointDefault is a default endpoint of gRPC Control service. - GRPCEndpointDefault = "" -) - -// AuthorizedKeys parses and returns an array of "authorized_keys" config -// parameter from "control" section. -// -// Returns an empty list if not set. -func AuthorizedKeys(c *config.Config) keys.PublicKeys { - strKeys := config.StringSliceSafe(c.Sub(subsection), "authorized_keys") - pubs := make(keys.PublicKeys, 0, len(strKeys)) - - for i := range strKeys { - pub, err := keys.NewPublicKeyFromString(strKeys[i]) - if err != nil { - panic(fmt.Errorf("invalid permitted key for Control service %s: %w", strKeys[i], err)) - } - - pubs = append(pubs, pub) - } - - return pubs -} - -// GRPC returns a structure that provides access to "grpc" subsection of -// "control" section. -func GRPC(c *config.Config) GRPCConfig { - return GRPCConfig{ - c.Sub(subsection).Sub(grpcSubsection), - } -} - -// Endpoint returns the value of "endpoint" config parameter. -// -// Returns GRPCEndpointDefault if the value is not a non-empty string. -func (g GRPCConfig) Endpoint() string { - v := config.String(g.cfg, "endpoint") - if v != "" { - return v - } - - return GRPCEndpointDefault -} diff --git a/cmd/frostfs-node/config/control/config_test.go b/cmd/frostfs-node/config/control/config_test.go deleted file mode 100644 index f702d83ae..000000000 --- a/cmd/frostfs-node/config/control/config_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package controlconfig_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestControlSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Empty(t, controlconfig.AuthorizedKeys(empty)) - require.Equal(t, controlconfig.GRPCEndpointDefault, controlconfig.GRPC(empty).Endpoint()) - }) - - const path = "../../../../config/example/node" - - pubs := make(keys.PublicKeys, 2) - pubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") - pubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") - - fileConfigTest := func(c *config.Config) { - require.Equal(t, pubs, controlconfig.AuthorizedKeys(c)) - require.Equal(t, "localhost:8090", controlconfig.GRPC(c).Endpoint()) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go deleted file mode 100644 index 7994e7809..000000000 --- a/cmd/frostfs-node/config/engine/config.go +++ /dev/null @@ -1,74 +0,0 @@ -package engineconfig - -import ( - "errors" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -const ( - subsection = "storage" -) - -// ErrNoShardConfigured is returned when at least 1 shard is required but none are found. -var ErrNoShardConfigured = errors.New("no shard configured") - -// IterateShards iterates over subsections of "shard" subsection of "storage" section of c, -// wrap them into shardconfig.Config and passes to f. -// -// Section names are expected to be consecutive integer numbers, starting from 0. -// -// Panics if N is not a positive number while shards are required. -func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) error) error { - c = c.Sub(subsection) - - c = c.Sub("shard") - def := c.Sub("default") - - alive := 0 - i := uint64(0) - for ; ; i++ { - si := strconv.FormatUint(i, 10) - - sc := shardconfig.From( - c.Sub(si), - ) - - if sc.Mode() == mode.Disabled { - continue - } - - // Path for the blobstor can't be present in the default section, because different shards - // must have different paths, so if it is missing, the shard is not here. - // At the same time checking for "blobstor" section doesn't work proper - // with configuration via the environment. - if (*config.Config)(sc).Value("metabase.path") == nil { - break - } - (*config.Config)(sc).SetDefault(def) - - if err := f(sc); err != nil { - return err - } - alive++ - } - if alive == 0 && required { - return ErrNoShardConfigured - } - return nil -} - -// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. -// -// Returns 0 if the the value is missing. -func ShardErrorThreshold(c *config.Config) uint32 { - return config.Uint32Safe(c.Sub(subsection), "shard_ro_error_threshold") -} - -// EngineLowMemoryConsumption returns value of "lowmem" config parmeter from "storage" section. -func EngineLowMemoryConsumption(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "low_mem") -} diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go deleted file mode 100644 index 401c54edc..000000000 --- a/cmd/frostfs-node/config/engine/config_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package engineconfig_test - -import ( - "io/fs" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" - fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" - gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" - piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" - writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -func TestIterateShards(t *testing.T) { - fileConfigTest := func(c *config.Config) { - var res []string - require.NoError(t, - engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { - res = append(res, sc.Metabase().Path()) - return nil - })) - require.Equal(t, []string{"abc", "xyz"}, res) - } - - const cfgDir = "./testdata/shards" - configtest.ForEachFileType(cfgDir, fileConfigTest) - configtest.ForEnvFileType(t, cfgDir, fileConfigTest) -} - -func TestEngineSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.ErrorIs(t, - engineconfig.IterateShards(empty, true, nil), - engineconfig.ErrNoShardConfigured) - - handlerCalled := false - - require.NoError(t, - engineconfig.IterateShards(empty, false, func(_ *shardconfig.Config) error { - handlerCalled = true - return nil - })) - - require.False(t, handlerCalled) - - require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) - require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - num := 0 - - require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) - - err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { - defer func() { - num++ - }() - - wc := sc.WriteCache() - meta := sc.Metabase() - blob := sc.BlobStor() - ss := blob.Storages() - pl := sc.Pilorama() - gc := sc.GC() - limits := sc.Limits() - - switch num { - case 0: - require.Equal(t, "tmp/0/blob/pilorama.db", pl.Path()) - require.Equal(t, fs.FileMode(piloramaconfig.PermDefault), pl.Perm()) - require.False(t, pl.NoSync()) - require.Equal(t, pl.MaxBatchDelay(), 10*time.Millisecond) - require.Equal(t, pl.MaxBatchSize(), 200) - - require.Equal(t, false, wc.Enabled()) - require.Equal(t, true, wc.NoSync()) - - require.Equal(t, "tmp/0/cache", wc.Path()) - require.EqualValues(t, 134217728, wc.MaxObjectSize()) - require.EqualValues(t, 30, wc.WorkerCount()) - require.EqualValues(t, 3221225472, wc.SizeLimit()) - require.EqualValues(t, 49, wc.CountLimit()) - require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize()) - - require.Equal(t, "tmp/0/meta", meta.Path()) - require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm()) - require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) - require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - - require.Equal(t, true, sc.Compression().Enabled) - require.Equal(t, compression.LevelFastest, sc.Compression().Level) - require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) - require.Equal(t, true, sc.Compression().EstimateCompressibility) - require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) - require.EqualValues(t, 102400, sc.SmallSizeLimit()) - - require.Equal(t, 2, len(ss)) - blz := blobovniczaconfig.From((*config.Config)(ss[0])) - require.Equal(t, "tmp/0/blob/blobovnicza", ss[0].Path()) - require.EqualValues(t, 0o644, blz.BoltDB().Perm()) - require.EqualValues(t, 4194304, blz.Size()) - require.EqualValues(t, 1, blz.ShallowDepth()) - require.EqualValues(t, 4, blz.ShallowWidth()) - require.EqualValues(t, 50, blz.OpenedCacheSize()) - require.EqualValues(t, time.Minute, blz.OpenedCacheTTL()) - require.EqualValues(t, 30*time.Second, blz.OpenedCacheExpInterval()) - require.EqualValues(t, 10, blz.InitWorkerCount()) - require.EqualValues(t, 30*time.Second, blz.RebuildDropTimeout()) - - require.Equal(t, "tmp/0/blob", ss[1].Path()) - require.EqualValues(t, 0o644, ss[1].Perm()) - - fst := fstreeconfig.From((*config.Config)(ss[1])) - require.EqualValues(t, 5, fst.Depth()) - require.Equal(t, false, fst.NoSync()) - - require.EqualValues(t, 150, gc.RemoverBatchSize()) - require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval()) - require.Equal(t, 1500, gc.ExpiredCollectorBatchSize()) - require.Equal(t, 15, gc.ExpiredCollectorWorkerCount()) - - require.Equal(t, false, sc.RefillMetabase()) - require.Equal(t, mode.ReadOnly, sc.Mode()) - require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) - - readLimits := limits.ToConfig().Read - writeLimits := limits.ToConfig().Write - require.Equal(t, 30*time.Second, readLimits.IdleTimeout) - require.Equal(t, int64(10_000), readLimits.MaxRunningOps) - require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) - require.Equal(t, 45*time.Second, writeLimits.IdleTimeout) - require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) - require.Equal(t, int64(100), writeLimits.MaxWaitingOps) - require.ElementsMatch(t, readLimits.Tags, - []qos.IOTagConfig{ - { - Tag: "internal", - Weight: toPtr(20), - ReservedOps: toPtr(1000), - LimitOps: toPtr(0), - }, - { - Tag: "client", - Weight: toPtr(70), - ReservedOps: toPtr(10000), - }, - { - Tag: "background", - Weight: toPtr(5), - LimitOps: toPtr(10000), - ReservedOps: toPtr(0), - }, - { - Tag: "writecache", - Weight: toPtr(5), - LimitOps: toPtr(25000), - }, - { - Tag: "policer", - Weight: toPtr(5), - LimitOps: toPtr(25000), - Prohibited: true, - }, - { - Tag: "treesync", - Weight: toPtr(5), - LimitOps: toPtr(25), - }, - }) - require.ElementsMatch(t, writeLimits.Tags, - []qos.IOTagConfig{ - { - Tag: "internal", - Weight: toPtr(200), - ReservedOps: toPtr(100), - LimitOps: toPtr(0), - }, - { - Tag: "client", - Weight: toPtr(700), - ReservedOps: toPtr(1000), - }, - { - Tag: "background", - Weight: toPtr(50), - LimitOps: toPtr(1000), - ReservedOps: toPtr(0), - }, - { - Tag: "writecache", - Weight: toPtr(50), - LimitOps: toPtr(2500), - }, - { - Tag: "policer", - Weight: toPtr(50), - LimitOps: toPtr(2500), - }, - { - Tag: "treesync", - Weight: toPtr(50), - LimitOps: toPtr(100), - }, - }) - case 1: - require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) - require.Equal(t, fs.FileMode(0o644), pl.Perm()) - require.True(t, pl.NoSync()) - require.Equal(t, 5*time.Millisecond, pl.MaxBatchDelay()) - require.Equal(t, 100, pl.MaxBatchSize()) - - require.Equal(t, true, wc.Enabled()) - require.Equal(t, false, wc.NoSync()) - - require.Equal(t, "tmp/1/cache", wc.Path()) - require.EqualValues(t, 134217728, wc.MaxObjectSize()) - require.EqualValues(t, 30, wc.WorkerCount()) - require.EqualValues(t, 4294967296, wc.SizeLimit()) - require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit()) - require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize()) - - require.Equal(t, "tmp/1/meta", meta.Path()) - require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm()) - require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) - require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - - require.Equal(t, false, sc.Compression().Enabled) - require.Equal(t, compression.LevelDefault, sc.Compression().Level) - require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) - require.EqualValues(t, 102400, sc.SmallSizeLimit()) - - require.Equal(t, 2, len(ss)) - - blz := blobovniczaconfig.From((*config.Config)(ss[0])) - require.Equal(t, "tmp/1/blob/blobovnicza", ss[0].Path()) - require.EqualValues(t, 4194304, blz.Size()) - require.EqualValues(t, 1, blz.ShallowDepth()) - require.EqualValues(t, 4, blz.ShallowWidth()) - require.EqualValues(t, 50, blz.OpenedCacheSize()) - require.EqualValues(t, 5*time.Minute, blz.OpenedCacheTTL()) - require.EqualValues(t, 15*time.Second, blz.OpenedCacheExpInterval()) - require.EqualValues(t, blobovniczaconfig.InitWorkerCountDefault, blz.InitWorkerCount()) - require.EqualValues(t, blobovniczaconfig.RebuildDropTimeoutDefault, blz.RebuildDropTimeout()) - - require.Equal(t, "tmp/1/blob", ss[1].Path()) - require.EqualValues(t, 0o644, ss[1].Perm()) - - fst := fstreeconfig.From((*config.Config)(ss[1])) - require.EqualValues(t, 5, fst.Depth()) - require.Equal(t, true, fst.NoSync()) - - require.EqualValues(t, 200, gc.RemoverBatchSize()) - require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval()) - require.Equal(t, gcconfig.ExpiredCollectorBatchSizeDefault, gc.ExpiredCollectorBatchSize()) - require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkerCount()) - - require.Equal(t, true, sc.RefillMetabase()) - require.Equal(t, mode.ReadWrite, sc.Mode()) - require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) - - readLimits := limits.ToConfig().Read - writeLimits := limits.ToConfig().Write - require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) - require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) - require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) - require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) - require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) - require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) - require.Equal(t, 0, len(readLimits.Tags)) - require.Equal(t, 0, len(writeLimits.Tags)) - } - return nil - }) - require.NoError(t, err) - require.Equal(t, 2, num) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} - -func toPtr(v float64) *float64 { - return &v -} diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go deleted file mode 100644 index ac69c4c4f..000000000 --- a/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go +++ /dev/null @@ -1,180 +0,0 @@ -package blobovniczaconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" -) - -// Config is a wrapper over the config section -// which provides access to Blobovnicza configurations. -type Config config.Config - -const ( - // SizeDefault is a default limit of estimates of Blobovnicza size. - SizeDefault = 1 << 30 - - // ShallowDepthDefault is a default shallow dir depth. - ShallowDepthDefault = 2 - - // ShallowWidthDefault is a default shallow dir width. - ShallowWidthDefault = 16 - - // OpenedCacheSizeDefault is a default cache size of opened Blobovnicza's. - OpenedCacheSizeDefault = 16 - - // OpenedCacheTTLDefault is a default cache ttl of opened Blobovnicza's. - OpenedCacheTTLDefault = 0 // means expiring is off - - // OpenedCacheExpIntervalDefault is a default cache cleanup interval for expired Blobovnicza's. - OpenedCacheExpIntervalDefault = 15 * time.Second - - // InitWorkerCountDefault is a default workers count to initialize Blobovnicza's. - InitWorkerCountDefault = 5 - - // RebuildDropTimeoutDefault is a default timeout value to wait before drop single blobovnicza. - RebuildDropTimeoutDefault = 10 * time.Second -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Type returns the storage type. -func (x *Config) Type() string { - return blobovniczatree.Type -} - -// Size returns the value of "size" config parameter. -// -// Returns SizeDefault if the value is not a positive number. -func (x *Config) Size() uint64 { - s := config.SizeInBytesSafe( - (*config.Config)(x), - "size", - ) - - if s > 0 { - return s - } - - return SizeDefault -} - -// ShallowDepth returns the value of "depth" config parameter. -// -// Returns ShallowDepthDefault if the value is not a positive number. -func (x *Config) ShallowDepth() uint64 { - d := config.UintSafe( - (*config.Config)(x), - "depth", - ) - - if d > 0 { - return d - } - - return ShallowDepthDefault -} - -// ShallowWidth returns the value of "width" config parameter. -// -// Returns ShallowWidthDefault if the value is not a positive number. -func (x *Config) ShallowWidth() uint64 { - d := config.UintSafe( - (*config.Config)(x), - "width", - ) - - if d > 0 { - return d - } - - return ShallowWidthDefault -} - -// OpenedCacheSize returns the value of "opened_cache_capacity" config parameter. -// -// Returns OpenedCacheSizeDefault if the value is not a positive number. -func (x *Config) OpenedCacheSize() int { - d := config.IntSafe( - (*config.Config)(x), - "opened_cache_capacity", - ) - - if d > 0 { - return int(d) - } - - return OpenedCacheSizeDefault -} - -// OpenedCacheTTL returns the value of "opened_cache_ttl" config parameter. -// -// Returns OpenedCacheTTLDefault if the value is not a positive number. -func (x *Config) OpenedCacheTTL() time.Duration { - d := config.DurationSafe( - (*config.Config)(x), - "opened_cache_ttl", - ) - - if d > 0 { - return d - } - - return OpenedCacheTTLDefault -} - -// OpenedCacheExpInterval returns the value of "opened_cache_exp_interval" config parameter. -// -// Returns OpenedCacheExpIntervalDefault if the value is not a positive number. -func (x *Config) OpenedCacheExpInterval() time.Duration { - d := config.DurationSafe( - (*config.Config)(x), - "opened_cache_exp_interval", - ) - - if d > 0 { - return d - } - - return OpenedCacheExpIntervalDefault -} - -// BoltDB returns config instance for querying bolt db specific parameters. -func (x *Config) BoltDB() *boltdbconfig.Config { - return (*boltdbconfig.Config)(x) -} - -// InitWorkerCount returns the value of "init_worker_count" config parameter. -// -// Returns InitWorkerCountDefault if the value is not a positive number. -func (x *Config) InitWorkerCount() int { - d := config.IntSafe( - (*config.Config)(x), - "init_worker_count", - ) - - if d > 0 { - return int(d) - } - - return InitWorkerCountDefault -} - -// RebuildDropTimeout returns the value of "rebuild_drop_timeout" config parameter. -// -// Returns RebuildDropTimeoutDefault if the value is not defined or invalid. -func (x *Config) RebuildDropTimeout() time.Duration { - d := config.DurationSafe( - (*config.Config)(x), - "rebuild_drop_timeout", - ) - if d > 0 { - return d - } - return RebuildDropTimeoutDefault -} diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/config.go deleted file mode 100644 index f8b2e2e9b..000000000 --- a/cmd/frostfs-node/config/engine/shard/blobstor/config.go +++ /dev/null @@ -1,33 +0,0 @@ -package blobstorconfig - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/storage" -) - -// Config is a wrapper over the config section -// which provides access to BlobStor configurations. -type Config config.Config - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Storages returns the value of storage subcomponents. -func (x *Config) Storages() []*storage.Config { - var ss []*storage.Config - for i := 0; ; i++ { - typ := config.String( - (*config.Config)(x), - strconv.Itoa(i)+".type") - if typ == "" { - return ss - } - - sub := storage.From((*config.Config)(x).Sub(strconv.Itoa(i))) - ss = append(ss, sub) - } -} diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/fstree/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/fstree/config.go deleted file mode 100644 index de9f6ba5b..000000000 --- a/cmd/frostfs-node/config/engine/shard/blobstor/fstree/config.go +++ /dev/null @@ -1,47 +0,0 @@ -package fstree - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" -) - -// Config is a wrapper over the config section -// which provides access to Blobovnicza configurations. -type Config config.Config - -// DepthDefault is a default shallow dir depth. -const DepthDefault = 4 - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Type returns the storage type. -func (x *Config) Type() string { - return fstree.Type -} - -// Depth returns the value of "depth" config parameter. -// -// Returns DepthDefault if the value is out of -// [1:fstree.MaxDepth] range. -func (x *Config) Depth() uint64 { - d := config.UintSafe( - (*config.Config)(x), - "depth", - ) - - if d >= 1 && d <= fstree.MaxDepth { - return d - } - - return DepthDefault -} - -// NoSync returns the value of "no_sync" config parameter. -// -// Returns false if the value is not a boolean or is missing. -func (x *Config) NoSync() bool { - return config.BoolSafe((*config.Config)(x), "no_sync") -} diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go deleted file mode 100644 index e83c69de2..000000000 --- a/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go +++ /dev/null @@ -1,55 +0,0 @@ -package storage - -import ( - "io/fs" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -type Config config.Config - -// PermDefault are default permission bits for BlobStor data. -const PermDefault = 0o660 - -func From(x *config.Config) *Config { - return (*Config)(x) -} - -// Type returns storage type. -func (x *Config) Type() string { - return config.String( - (*config.Config)(x), - "type") -} - -// Path returns the value of "path" config parameter. -// -// Panics if the value is not a non-empty string. -func (x *Config) Path() string { - p := config.String( - (*config.Config)(x), - "path", - ) - - if p == "" { - panic("blobstor path not set") - } - - return p -} - -// Perm returns the value of "perm" config parameter as a fs.FileMode. -// -// Returns PermDefault if the value is not a non-zero number. -func (x *Config) Perm() fs.FileMode { - p := config.UintSafe( - (*config.Config)(x), - "perm", - ) - - if p == 0 { - p = PermDefault - } - - return fs.FileMode(p) -} diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go deleted file mode 100644 index b564d36f8..000000000 --- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go +++ /dev/null @@ -1,64 +0,0 @@ -package boltdbconfig - -import ( - "io/fs" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -// Config is a wrapper over the config section -// which provides access to boltdb specific parameters. -type Config config.Config - -const ( - // PermDefault is a default permission bits for metabase file. - PermDefault = 0o660 -) - -// Perm returns the value of "perm" config parameter as a fs.FileMode. -// -// Returns PermDefault if the value is not a positive number. -func (x *Config) Perm() fs.FileMode { - p := config.UintSafe( - (*config.Config)(x), - "perm", - ) - - if p == 0 { - p = PermDefault - } - - return fs.FileMode(p) -} - -// MaxBatchDelay returns the value of "max_batch_delay" config parameter. -// -// Returns 0 if the value is not a positive number. -func (x *Config) MaxBatchDelay() time.Duration { - d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - return max(d, 0) -} - -// MaxBatchSize returns the value of "max_batch_size" config parameter. -// -// Returns 0 if the value is not a positive number. -func (x *Config) MaxBatchSize() int { - s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - return max(s, 0) -} - -// NoSync returns the value of "no_sync" config parameter. -// -// Returns false if the value is not a boolean. -func (x *Config) NoSync() bool { - return config.BoolSafe((*config.Config)(x), "no_sync") -} - -// PageSize returns the value of "page_size" config parameter. -// -// Returns 0 if the value is not a positive number. -func (x *Config) PageSize() int { - s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) - return max(s, 0) -} diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go deleted file mode 100644 index d42646da7..000000000 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ /dev/null @@ -1,173 +0,0 @@ -package shardconfig - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" - gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" - limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" - metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" - piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" - writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -// Config is a wrapper over the config section -// which provides access to Shard configurations. -type Config config.Config - -const ( - // SmallSizeLimitDefault is a default limit of small objects payload in bytes. - SmallSizeLimitDefault = 1 << 20 - EstimateCompressibilityThresholdDefault = 0.1 - RefillMetabaseWorkersCountDefault = 500 -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -func (x *Config) Compression() compression.Config { - cc := (*config.Config)(x).Sub("compression") - if cc == nil { - return compression.Config{} - } - return compression.Config{ - Enabled: config.BoolSafe(cc, "enabled"), - UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), - Level: compression.Level(config.StringSafe(cc, "level")), - EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), - EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), - } -} - -// EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. -// -// Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func estimateCompressibilityThreshold(c *config.Config) float64 { - v := config.FloatOrDefault( - c, - "estimate_compressibility_threshold", - EstimateCompressibilityThresholdDefault) - if v < 0.0 || v > 1.0 { - return EstimateCompressibilityThresholdDefault - } - return v -} - -// SmallSizeLimit returns the value of "small_object_size" config parameter. -// -// Returns SmallSizeLimitDefault if the value is not a positive number. -func (x *Config) SmallSizeLimit() uint64 { - l := config.SizeInBytesSafe( - (*config.Config)(x), - "small_object_size", - ) - - if l > 0 { - return l - } - - return SmallSizeLimitDefault -} - -// BlobStor returns "blobstor" subsection as a blobstorconfig.Config. -func (x *Config) BlobStor() *blobstorconfig.Config { - return blobstorconfig.From( - (*config.Config)(x). - Sub("blobstor"), - ) -} - -// Metabase returns "metabase" subsection as a metabaseconfig.Config. -func (x *Config) Metabase() *metabaseconfig.Config { - return metabaseconfig.From( - (*config.Config)(x). - Sub("metabase"), - ) -} - -// WriteCache returns "writecache" subsection as a writecacheconfig.Config. -func (x *Config) WriteCache() *writecacheconfig.Config { - return writecacheconfig.From( - (*config.Config)(x). - Sub("writecache"), - ) -} - -// Pilorama returns "pilorama" subsection as a piloramaconfig.Config. -func (x *Config) Pilorama() *piloramaconfig.Config { - return piloramaconfig.From( - (*config.Config)(x). - Sub("pilorama"), - ) -} - -// GC returns "gc" subsection as a gcconfig.Config. -func (x *Config) GC() *gcconfig.Config { - return gcconfig.From( - (*config.Config)(x). - Sub("gc"), - ) -} - -// Limits returns "limits" subsection as a limitsconfig.Config. -func (x *Config) Limits() *limitsconfig.Config { - return limitsconfig.From( - (*config.Config)(x). - Sub("limits"), - ) -} - -// RefillMetabase returns the value of "resync_metabase" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) RefillMetabase() bool { - return config.BoolSafe( - (*config.Config)(x), - "resync_metabase", - ) -} - -// RefillMetabaseWorkersCount returns the value of "resync_metabase_worker_count" config parameter. -// -// Returns RefillMetabaseWorkersCountDefault if the value is not a positive number. -func (x *Config) RefillMetabaseWorkersCount() int { - v := config.IntSafe( - (*config.Config)(x), - "resync_metabase_worker_count", - ) - if v > 0 { - return int(v) - } - return RefillMetabaseWorkersCountDefault -} - -// Mode return the value of "mode" config parameter. -// -// Panics if read the value is not one of predefined -// shard modes. -func (x *Config) Mode() (m mode.Mode) { - s := config.StringSafe( - (*config.Config)(x), - "mode", - ) - - switch s { - case "read-write", "": - m = mode.ReadWrite - case "read-only": - m = mode.ReadOnly - case "degraded": - m = mode.Degraded - case "degraded-read-only": - m = mode.DegradedReadOnly - case "disabled": - m = mode.Disabled - default: - panic("unknown shard mode: " + s) - } - - return -} diff --git a/cmd/frostfs-node/config/engine/shard/gc/config.go b/cmd/frostfs-node/config/engine/shard/gc/config.go deleted file mode 100644 index 8cb90d3ff..000000000 --- a/cmd/frostfs-node/config/engine/shard/gc/config.go +++ /dev/null @@ -1,98 +0,0 @@ -package gcconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -// Config is a wrapper over the config section -// which provides access to Shard's GC configurations. -type Config config.Config - -const ( - // RemoverBatchSizeDefault is a default batch size for Shard GC's remover. - RemoverBatchSizeDefault = 100 - - // RemoverSleepIntervalDefault is a default sleep interval of Shard GC's remover. - RemoverSleepIntervalDefault = time.Minute - - // ExpiredCollectorWorkersCountDefault is a default workers count of Shard GC expired object collector. - ExpiredCollectorWorkersCountDefault = 5 - - // ExpiredCollectorBatchSizeDefault is a default batch size of Shard GC expired object collector. - ExpiredCollectorBatchSizeDefault = 500 -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// RemoverBatchSize returns the value of "remover_batch_size" -// config parameter. -// -// Returns RemoverBatchSizeDefault if the value is not a positive number. -func (x *Config) RemoverBatchSize() int { - s := config.IntSafe( - (*config.Config)(x), - "remover_batch_size", - ) - - if s > 0 { - return int(s) - } - - return RemoverBatchSizeDefault -} - -// RemoverSleepInterval returns the value of "remover_sleep_interval" -// config parameter. -// -// Returns RemoverSleepIntervalDefault if the value is not a positive number. -func (x *Config) RemoverSleepInterval() time.Duration { - s := config.DurationSafe( - (*config.Config)(x), - "remover_sleep_interval", - ) - - if s > 0 { - return s - } - - return RemoverSleepIntervalDefault -} - -// ExpiredCollectorWorkerCount returns the value of "expired_collector_worker_count" -// config parameter. -// -// Returns ExpiredCollectorWorkersCountDefault if the value is not a positive number. -func (x *Config) ExpiredCollectorWorkerCount() int { - s := config.IntSafe( - (*config.Config)(x), - "expired_collector_worker_count", - ) - - if s > 0 { - return int(s) - } - - return ExpiredCollectorWorkersCountDefault -} - -// ExpiredCollectorBatchSize returns the value of "expired_collector_batch_size" -// config parameter. -// -// Returns ExpiredCollectorBatchSizeDefault if the value is not a positive number. -func (x *Config) ExpiredCollectorBatchSize() int { - s := config.IntSafe( - (*config.Config)(x), - "expired_collector_batch_size", - ) - - if s > 0 { - return int(s) - } - - return ExpiredCollectorBatchSizeDefault -} diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go deleted file mode 100644 index ccd1e0000..000000000 --- a/cmd/frostfs-node/config/engine/shard/limits/config.go +++ /dev/null @@ -1,112 +0,0 @@ -package limits - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "github.com/spf13/cast" -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Config is a wrapper over the config section -// which provides access to Shard's limits configurations. -type Config config.Config - -func (x *Config) ToConfig() qos.LimiterConfig { - result := qos.LimiterConfig{ - Read: x.read(), - Write: x.write(), - } - panicOnErr(result.Validate()) - return result -} - -func (x *Config) read() qos.OpConfig { - return x.parse("read") -} - -func (x *Config) write() qos.OpConfig { - return x.parse("write") -} - -func (x *Config) parse(sub string) qos.OpConfig { - c := (*config.Config)(x).Sub(sub) - var result qos.OpConfig - - if s := config.Int(c, "max_waiting_ops"); s > 0 { - result.MaxWaitingOps = s - } else { - result.MaxWaitingOps = qos.NoLimit - } - - if s := config.Int(c, "max_running_ops"); s > 0 { - result.MaxRunningOps = s - } else { - result.MaxRunningOps = qos.NoLimit - } - - if s := config.DurationSafe(c, "idle_timeout"); s > 0 { - result.IdleTimeout = s - } else { - result.IdleTimeout = qos.DefaultIdleTimeout - } - - result.Tags = tags(c) - - return result -} - -func tags(c *config.Config) []qos.IOTagConfig { - c = c.Sub("tags") - var result []qos.IOTagConfig - for i := 0; ; i++ { - tag := config.String(c, strconv.Itoa(i)+".tag") - if tag == "" { - return result - } - - var tagConfig qos.IOTagConfig - tagConfig.Tag = tag - - v := c.Value(strconv.Itoa(i) + ".weight") - if v != nil { - w, err := cast.ToFloat64E(v) - panicOnErr(err) - tagConfig.Weight = &w - } - - v = c.Value(strconv.Itoa(i) + ".limit_ops") - if v != nil { - l, err := cast.ToFloat64E(v) - panicOnErr(err) - tagConfig.LimitOps = &l - } - - v = c.Value(strconv.Itoa(i) + ".reserved_ops") - if v != nil { - r, err := cast.ToFloat64E(v) - panicOnErr(err) - tagConfig.ReservedOps = &r - } - - v = c.Value(strconv.Itoa(i) + ".prohibited") - if v != nil { - r, err := cast.ToBoolE(v) - panicOnErr(err) - tagConfig.Prohibited = r - } - - result = append(result, tagConfig) - } -} - -func panicOnErr(err error) { - if err != nil { - panic(err) - } -} diff --git a/cmd/frostfs-node/config/engine/shard/metabase/config.go b/cmd/frostfs-node/config/engine/shard/metabase/config.go deleted file mode 100644 index 3730094d9..000000000 --- a/cmd/frostfs-node/config/engine/shard/metabase/config.go +++ /dev/null @@ -1,36 +0,0 @@ -package metabaseconfig - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb" -) - -// Config is a wrapper over the config section -// which provides access to Metabase configurations. -type Config config.Config - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Path returns the value of "path" config parameter. -// -// Panics if the value is not a non-empty string. -func (x *Config) Path() string { - p := config.String( - (*config.Config)(x), - "path", - ) - - if p == "" { - panic("metabase path not set") - } - - return p -} - -// BoltDB returns config instance for querying bolt db specific parameters. -func (x *Config) BoltDB() *boltdbconfig.Config { - return (*boltdbconfig.Config)(x) -} diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go deleted file mode 100644 index 5d4e8f408..000000000 --- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go +++ /dev/null @@ -1,64 +0,0 @@ -package piloramaconfig - -import ( - "io/fs" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -// Config is a wrapper over the config section -// which provides access to Metabase configurations. -type Config config.Config - -const ( - // PermDefault is a default permission bits for metabase file. - PermDefault = 0o660 -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Path returns the value of "path" config parameter. -// -// Returns empty string if missing, for compatibility with older configurations. -func (x *Config) Path() string { - return config.String((*config.Config)(x), "path") -} - -// Perm returns the value of "perm" config parameter as a fs.FileMode. -// -// Returns PermDefault if the value is not a positive number. -func (x *Config) Perm() fs.FileMode { - p := config.UintSafe((*config.Config)(x), "perm") - if p == 0 { - p = PermDefault - } - - return fs.FileMode(p) -} - -// NoSync returns the value of "no_sync" config parameter as a bool value. -// -// Returns false if the value is not a boolean. -func (x *Config) NoSync() bool { - return config.BoolSafe((*config.Config)(x), "no_sync") -} - -// MaxBatchDelay returns the value of "max_batch_delay" config parameter. -// -// Returns 0 if the value is not a positive number. -func (x *Config) MaxBatchDelay() time.Duration { - d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - return max(d, 0) -} - -// MaxBatchSize returns the value of "max_batch_size" config parameter. -// -// Returns 0 if the value is not a positive number. -func (x *Config) MaxBatchSize() int { - s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - return max(s, 0) -} diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go deleted file mode 100644 index 6fff0308b..000000000 --- a/cmd/frostfs-node/config/engine/shard/writecache/config.go +++ /dev/null @@ -1,140 +0,0 @@ -package writecacheconfig - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -// Config is a wrapper over the config section -// which provides access to WriteCache configurations. -type Config config.Config - -const ( - // MaxSizeDefault is a default value of the object payload size limit. - MaxSizeDefault = 64 << 20 - - // WorkersNumberDefault is a default number of workers. - WorkersNumberDefault = 20 - - // SizeLimitDefault is a default write-cache size limit. - SizeLimitDefault = 1 << 30 - - // CountLimitDefault is a default write-cache count limit. - CountLimitDefault = 0 - - MaxFlushingObjectsSizeDefault = 128 << 20 -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Enabled returns true if write-cache is enabled and false otherwise. -// -// Panics if the value is not a boolean. -func (x *Config) Enabled() bool { - return config.Bool((*config.Config)(x), "enabled") -} - -// Path returns the value of "path" config parameter. -// -// Panics if the value is not a non-empty string. -func (x *Config) Path() string { - p := config.String( - (*config.Config)(x), - "path", - ) - - if p == "" { - panic("write cache path not set") - } - - return p -} - -// MaxObjectSize returns the value of "max_object_size" config parameter. -// -// Returns MaxSizeDefault if the value is not a positive number. -func (x *Config) MaxObjectSize() uint64 { - s := config.SizeInBytesSafe( - (*config.Config)(x), - "max_object_size", - ) - - if s > 0 { - return s - } - - return MaxSizeDefault -} - -// WorkerCount returns the value of "flush_worker_count" config parameter. -// -// Returns WorkersNumberDefault if the value is not a positive number. -func (x *Config) WorkerCount() int { - c := config.IntSafe( - (*config.Config)(x), - "flush_worker_count", - ) - - if c > 0 { - return int(c) - } - - return WorkersNumberDefault -} - -// SizeLimit returns the value of "capacity" config parameter. -// -// Returns SizeLimitDefault if the value is not a positive number. -func (x *Config) SizeLimit() uint64 { - c := config.SizeInBytesSafe( - (*config.Config)(x), - "capacity", - ) - - if c > 0 { - return c - } - - return SizeLimitDefault -} - -// CountLimit returns the value of "max_object_count" config parameter. -// -// Returns CountLimitDefault if the value is not a positive number. -func (x *Config) CountLimit() uint64 { - c := config.SizeInBytesSafe( - (*config.Config)(x), - "max_object_count", - ) - - if c > 0 { - return c - } - - return CountLimitDefault -} - -// NoSync returns the value of "no_sync" config parameter. -// -// Returns false if the value is not a boolean. -func (x *Config) NoSync() bool { - return config.BoolSafe((*config.Config)(x), "no_sync") -} - -// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter. -// -// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number. -func (x *Config) MaxFlushingObjectsSize() uint64 { - s := config.SizeInBytesSafe( - (*config.Config)(x), - "max_flushing_objects_size", - ) - - if s > 0 { - return s - } - - return MaxFlushingObjectsSizeDefault -} diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env deleted file mode 100644 index 079789b0f..000000000 --- a/cmd/frostfs-node/config/engine/testdata/shards.env +++ /dev/null @@ -1,3 +0,0 @@ -FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc -FROSTFS_STORAGE_SHARD_1_MODE=disabled -FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json deleted file mode 100644 index b3d6abe85..000000000 --- a/cmd/frostfs-node/config/engine/testdata/shards.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "storage.shard": { - "0": { - "metabase.path": "abc" - }, - "1": { - "mode": "disabled" - }, - "2": { - "metabase.path": "xyz" - } - } -} diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml deleted file mode 100644 index bbbba3af8..000000000 --- a/cmd/frostfs-node/config/engine/testdata/shards.yaml +++ /dev/null @@ -1,7 +0,0 @@ -storage.shard: - 0: - metabase.path: abc - 1: - mode: disabled - 2: - metabase.path: xyz diff --git a/cmd/frostfs-node/config/grpc/config.go b/cmd/frostfs-node/config/grpc/config.go deleted file mode 100644 index 37dd76426..000000000 --- a/cmd/frostfs-node/config/grpc/config.go +++ /dev/null @@ -1,126 +0,0 @@ -package grpcconfig - -import ( - "errors" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -var ( - errEndpointNotSet = errors.New("empty/not set endpoint, see `grpc.endpoint` section") - errTLSKeyNotSet = errors.New("empty/not set TLS key file path, see `grpc.tls.key` section") - errTLSCertNotSet = errors.New("empty/not set TLS certificate file path, see `grpc.tls.certificate` section") -) - -// Config is a wrapper over the config section -// which provides access to gRPC server configurations. -type Config config.Config - -// Endpoint returns the value of "endpoint" config parameter. -// -// Panics if the value is not a non-empty string. -func (x *Config) Endpoint() string { - v := config.StringSafe( - (*config.Config)(x), - "endpoint") - if v == "" { - panic(errEndpointNotSet) - } - - return v -} - -// TLS returns "tls" subsection as a TLSConfig. -// -// Returns nil if "enabled" value of "tls" subsection is false. -func (x *Config) TLS() *TLSConfig { - sub := (*config.Config)(x). - Sub("tls") - - if !config.BoolSafe(sub, "enabled") { - return nil - } - - return &TLSConfig{ - cfg: sub, - } -} - -// TLSConfig is a wrapper over the config section -// which provides access to TLS configurations -// of the gRPC server. -type TLSConfig struct { - cfg *config.Config -} - -// KeyFile returns the value of "key" config parameter. -// -// Panics if the value is not a non-empty string. -func (tls TLSConfig) KeyFile() string { - v := config.StringSafe(tls.cfg, "key") - if v == "" { - panic(errTLSKeyNotSet) - } - - return v -} - -// CertificateFile returns the value of "certificate" config parameter. -// -// Panics if the value is not a non-empty string. -func (tls TLSConfig) CertificateFile() string { - v := config.StringSafe(tls.cfg, "certificate") - if v == "" { - panic(errTLSCertNotSet) - } - - return v -} - -// UseInsecureCrypto returns true if TLS 1.2 cipher suite should not be restricted. -func (tls TLSConfig) UseInsecureCrypto() bool { - return config.BoolSafe(tls.cfg, "use_insecure_crypto") -} - -// IterateEndpoints iterates over subsections of "grpc" section of c, -// wrap them into Config and passes to f. -// -// Section names are expected to be consecutive integer numbers, starting from 0. -// -// Panics if N is not a positive number. -func IterateEndpoints(c *config.Config, f func(*Config)) { - c = c.Sub("grpc") - - i := uint64(0) - for ; ; i++ { - si := strconv.FormatUint(i, 10) - - sc := (*Config)(c.Sub(si)) - - e := config.StringSafe((*config.Config)(sc), "endpoint") - if e == "" { - break - } - - f(sc) - } - if i == 0 { - panic("no gRPC server configured") - } -} - -const DefaultReconnectInterval = time.Minute - -// ReconnectTimeout returns the value of "reconnect_interval" gRPC config parameter. -// -// Returns DefaultReconnectInterval if value is not defined or invalid. -func ReconnectTimeout(c *config.Config) time.Duration { - grpcConf := c.Sub("grpc") - ri := config.DurationSafe(grpcConf, "reconnect_interval") - if ri > 0 { - return ri - } - return DefaultReconnectInterval -} diff --git a/cmd/frostfs-node/config/grpc/config_test.go b/cmd/frostfs-node/config/grpc/config_test.go deleted file mode 100644 index 13ce4294e..000000000 --- a/cmd/frostfs-node/config/grpc/config_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package grpcconfig - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestGRPCSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - require.Panics(t, func() { - IterateEndpoints(configtest.EmptyConfig(), nil) - }) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - num := 0 - - IterateEndpoints(c, func(sc *Config) { - defer func() { - num++ - }() - - tls := sc.TLS() - - switch num { - case 0: - require.Equal(t, "s01.frostfs.devenv:8080", sc.Endpoint()) - - require.NotNil(t, tls) - require.Equal(t, "/path/to/cert", tls.CertificateFile()) - require.Equal(t, "/path/to/key", tls.KeyFile()) - require.False(t, tls.UseInsecureCrypto()) - case 1: - require.Equal(t, "s02.frostfs.devenv:8080", sc.Endpoint()) - require.Nil(t, tls) - case 2: - require.Equal(t, "s03.frostfs.devenv:8080", sc.Endpoint()) - require.NotNil(t, tls) - require.True(t, tls.UseInsecureCrypto()) - } - }) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go deleted file mode 100644 index 20f373184..000000000 --- a/cmd/frostfs-node/config/logger/config.go +++ /dev/null @@ -1,118 +0,0 @@ -package loggerconfig - -import ( - "os" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore/loki" -) - -const ( - // LevelDefault is a default logger level. - LevelDefault = "info" - DestinationDefault = logger.DestinationStdout - subsection = "logger" - lokiSubsection = "loki" - AddressDefault = "localhost:3100" - BatchEntriesNumberDefault = 100 - BatchWaitDefault = time.Second -) - -// Level returns the value of "level" config parameter -// from "logger" section. -// -// Returns LevelDefault if the value is not a non-empty string. -func Level(c *config.Config) string { - v := config.StringSafe( - c.Sub(subsection), - "level", - ) - if v != "" { - return v - } - - return LevelDefault -} - -// Destination returns the value of "destination" config parameter -// from "logger" section. -// -// Returns DestinationDefault if the value is not a non-empty string. -func Destination(c *config.Config) string { - v := config.StringSafe( - c.Sub(subsection), - "destination", - ) - if v != "" { - return v - } - - return DestinationDefault -} - -// Timestamp returns the value of "timestamp" config parameter -// from "logger" section. -// -// Returns false if the value isn't specified. -func Timestamp(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "timestamp") -} - -// Tags returns the value of "tags" config parameter from "logger" section. -func Tags(c *config.Config) [][]string { - var res [][]string - sub := c.Sub(subsection).Sub("tags") - for i := 0; ; i++ { - s := sub.Sub(strconv.FormatInt(int64(i), 10)) - names := config.StringSafe(s, "names") - if names == "" { - break - } - res = append(res, []string{names, config.StringSafe(s, "level")}) - } - return res -} - -// ToLokiConfig extracts loki config. -func ToLokiConfig(c *config.Config) loki.Config { - hostname, _ := os.Hostname() - return loki.Config{ - Enabled: config.BoolSafe(c.Sub(subsection).Sub(lokiSubsection), "enabled"), - BatchWait: getBatchWait(c), - BatchEntriesNumber: getBatchEntriesNumber(c), - Endpoint: getEndpoint(c), - Labels: map[string]string{ - "hostname": hostname, - }, - } -} - -func getBatchWait(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection).Sub(lokiSubsection), "max_batch_delay") - if v > 0 { - return v - } - - return BatchWaitDefault -} - -func getBatchEntriesNumber(c *config.Config) int { - v := config.IntSafe(c.Sub(subsection).Sub(lokiSubsection), "max_batch_size") - if v > 0 { - return int(v) - } - - return BatchEntriesNumberDefault -} - -func getEndpoint(c *config.Config) string { - v := config.StringSafe(c.Sub(subsection).Sub(lokiSubsection), "endpoint") - if v != "" { - return v - } - - return AddressDefault -} diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go deleted file mode 100644 index 796ad529e..000000000 --- a/cmd/frostfs-node/config/logger/config_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package loggerconfig_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestLoggerSection_Level(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - require.Equal(t, loggerconfig.LevelDefault, loggerconfig.Level(configtest.EmptyConfig())) - require.Equal(t, loggerconfig.DestinationDefault, loggerconfig.Destination(configtest.EmptyConfig())) - require.Equal(t, false, loggerconfig.Timestamp(configtest.EmptyConfig())) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, "debug", loggerconfig.Level(c)) - require.Equal(t, "journald", loggerconfig.Destination(c)) - require.Equal(t, true, loggerconfig.Timestamp(c)) - tags := loggerconfig.Tags(c) - require.Equal(t, "main, morph", tags[0][0]) - require.Equal(t, "debug", tags[0][1]) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/metrics/config.go b/cmd/frostfs-node/config/metrics/config.go deleted file mode 100644 index 1e934ec94..000000000 --- a/cmd/frostfs-node/config/metrics/config.go +++ /dev/null @@ -1,51 +0,0 @@ -package metricsconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "prometheus" - - // ShutdownTimeoutDefault is a default value for metrics HTTP service timeout. - ShutdownTimeoutDefault = 30 * time.Second - - // AddressDefault is a default value for metrics HTTP service endpoint. - AddressDefault = "localhost:9090" -) - -// Enabled returns the value of "enabled" config parameter -// from "prometheus" section. -// -// Returns false if the value is missing or invalid. -func Enabled(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "enabled") -} - -// ShutdownTimeout returns the value of "shutdown_timeout" config parameter -// from "prometheus" section. -// -// Returns ShutdownTimeoutDefault if the value is not positive duration. -func ShutdownTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "shutdown_timeout") - if v > 0 { - return v - } - - return ShutdownTimeoutDefault -} - -// Address returns the value of "address" config parameter -// from "prometheus" section. -// -// Returns AddressDefault if the value is not set. -func Address(c *config.Config) string { - v := config.StringSafe(c.Sub(subsection), "address") - if v != "" { - return v - } - - return AddressDefault -} diff --git a/cmd/frostfs-node/config/metrics/config_test.go b/cmd/frostfs-node/config/metrics/config_test.go deleted file mode 100644 index c2a1b1fc4..000000000 --- a/cmd/frostfs-node/config/metrics/config_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package metricsconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestMetricsSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - to := metricsconfig.ShutdownTimeout(configtest.EmptyConfig()) - addr := metricsconfig.Address(configtest.EmptyConfig()) - - require.Equal(t, metricsconfig.ShutdownTimeoutDefault, to) - require.Equal(t, metricsconfig.AddressDefault, addr) - require.False(t, metricsconfig.Enabled(configtest.EmptyConfig())) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - to := metricsconfig.ShutdownTimeout(c) - addr := metricsconfig.Address(c) - - require.Equal(t, 15*time.Second, to) - require.Equal(t, "localhost:9090", addr) - require.True(t, metricsconfig.Enabled(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go deleted file mode 100644 index a9f774d18..000000000 --- a/cmd/frostfs-node/config/morph/config.go +++ /dev/null @@ -1,173 +0,0 @@ -package morphconfig - -import ( - "errors" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -const ( - subsection = "morph" - - // DialTimeoutDefault is a default dial timeout of morph chain client connection. - DialTimeoutDefault = 5 * time.Second - - // PriorityDefault is a default endpoint priority for the morph client. - PriorityDefault = 1 - - // CacheTTLDefault is a default value for cached values TTL. - // It is 0, because actual default depends on block time. - CacheTTLDefault = time.Duration(0) - - // SwitchIntervalDefault is a default Neo RPCs switch interval. - SwitchIntervalDefault = 2 * time.Minute - - // APEChainCacheSizeDefault is a default value of APE chain cache. - APEChainCacheSizeDefault = 10_000 - - // FrostfsIDCacheSizeDefault is a default value of APE chain cache. - FrostfsIDCacheSizeDefault = 10_000 - - // ContainerCacheSizeDefault represents the default size for the container cache. - ContainerCacheSizeDefault = 100 - - // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. - PollCandidatesTimeoutDefault = 20 * time.Second -) - -var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") - -// RPCEndpoint returns list of the values of "rpc_endpoint" config parameter -// from "morph" section. -// -// Throws panic if list is empty. -func RPCEndpoint(c *config.Config) []client.Endpoint { - var es []client.Endpoint - - sub := c.Sub(subsection).Sub("rpc_endpoint") - for i := 0; ; i++ { - s := sub.Sub(strconv.FormatInt(int64(i), 10)) - addr := config.StringSafe(s, "address") - if addr == "" { - break - } - - priority := int(config.IntSafe(s, "priority")) - if priority <= 0 { - priority = PriorityDefault - } - - var mtlsConfig *client.MTLSConfig - rootCAs := config.StringSliceSafe(s, "trusted_ca_list") - if len(rootCAs) != 0 { - mtlsConfig = &client.MTLSConfig{ - TrustedCAList: rootCAs, - KeyFile: config.StringSafe(s, "key"), - CertFile: config.StringSafe(s, "certificate"), - } - } - - es = append(es, client.Endpoint{ - Address: addr, - Priority: priority, - MTLSConfig: mtlsConfig, - }) - } - - if len(es) == 0 { - panic(errNoMorphEndpoints) - } - return es -} - -// DialTimeout returns the value of "dial_timeout" config parameter -// from "morph" section. -// -// Returns DialTimeoutDefault if the value is not positive duration. -func DialTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "dial_timeout") - if v > 0 { - return v - } - - return DialTimeoutDefault -} - -// CacheTTL returns the value of "cache_ttl" config parameter -// from "morph" section. -// -// Returns CacheTTLDefault if value is zero or invalid. Supports negative durations. -func CacheTTL(c *config.Config) time.Duration { - res := config.DurationSafe(c.Sub(subsection), "cache_ttl") - if res != 0 { - return res - } - - return CacheTTLDefault -} - -// ContainerCacheSize returns the value of "container_cache_size" config parameter -// from "morph" section. -// -// Returns 0 if the value is not positive integer. -// Returns ContainerCacheSizeDefault if the value is missing. -func ContainerCacheSize(c *config.Config) uint32 { - if c.Sub(subsection).Value("container_cache_size") == nil { - return ContainerCacheSizeDefault - } - return config.Uint32Safe(c.Sub(subsection), "container_cache_size") -} - -// SwitchInterval returns the value of "switch_interval" config parameter -// from "morph" section. -// -// Returns SwitchIntervalDefault if value is not positive duration. -func SwitchInterval(c *config.Config) time.Duration { - res := config.DurationSafe(c.Sub(subsection), "switch_interval") - if res != 0 { - return res - } - - return SwitchIntervalDefault -} - -// APEChainCacheSize returns the value of "ape_chain_cache_size" config parameter -// from "morph" section. -// -// Returns 0 if the value is not positive integer. -// Returns APEChainCacheSizeDefault if the value is missing. -func APEChainCacheSize(c *config.Config) uint32 { - if c.Sub(subsection).Value("ape_chain_cache_size") == nil { - return APEChainCacheSizeDefault - } - return config.Uint32Safe(c.Sub(subsection), "ape_chain_cache_size") -} - -// FrostfsIDCacheSize returns the value of "frostfsid_cache_size" config parameter -// from "morph" section. -// -// Returns 0 if the value is not positive integer. -// Returns FrostfsIDCacheSizeDefault if the value is missing. -func FrostfsIDCacheSize(c *config.Config) uint32 { - if c.Sub(subsection).Value("frostfsid_cache_size") == nil { - return FrostfsIDCacheSizeDefault - } - return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") -} - -// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter -// from "morph" section. -// -// Returns PollCandidatesTimeoutDefault if the value is not positive duration. -func NetmapCandidatesPollInterval(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection). - Sub("netmap").Sub("candidates"), "poll_interval") - if v > 0 { - return v - } - - return PollCandidatesTimeoutDefault -} diff --git a/cmd/frostfs-node/config/morph/config_test.go b/cmd/frostfs-node/config/morph/config_test.go deleted file mode 100644 index 5a021abc3..000000000 --- a/cmd/frostfs-node/config/morph/config_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package morphconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/stretchr/testify/require" -) - -func TestMorphSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Panics(t, func() { morphconfig.RPCEndpoint(empty) }) - require.Equal(t, morphconfig.DialTimeoutDefault, morphconfig.DialTimeout(empty)) - require.Equal(t, morphconfig.CacheTTLDefault, morphconfig.CacheTTL(empty)) - require.Equal(t, morphconfig.SwitchIntervalDefault, morphconfig.SwitchInterval(empty)) - require.Equal(t, uint32(morphconfig.APEChainCacheSizeDefault), morphconfig.APEChainCacheSize(empty)) - }) - - const path = "../../../../config/example/node" - - rpcs := []client.Endpoint{ - { - Address: "wss://rpc1.morph.frostfs.info:40341/ws", - Priority: 1, - MTLSConfig: &client.MTLSConfig{ - TrustedCAList: []string{ - "/path/to/ca.pem", - }, - KeyFile: "/path/to/key", - CertFile: "/path/to/cert", - }, - }, - { - Address: "wss://rpc2.morph.frostfs.info:40341/ws", - Priority: 2, - }, - } - - fileConfigTest := func(c *config.Config) { - require.Equal(t, rpcs, morphconfig.RPCEndpoint(c)) - require.Equal(t, 30*time.Second, morphconfig.DialTimeout(c)) - require.Equal(t, 15*time.Second, morphconfig.CacheTTL(c)) - require.Equal(t, 3*time.Minute, morphconfig.SwitchInterval(c)) - require.Equal(t, uint32(100000), morphconfig.APEChainCacheSize(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go deleted file mode 100644 index f598efc51..000000000 --- a/cmd/frostfs-node/config/multinet/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package multinet - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "multinet" - - FallbackDelayDefault = 300 * time.Millisecond -) - -// Enabled returns the value of "enabled" config parameter from "multinet" section. -func Enabled(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "enabled") -} - -type Subnet struct { - Mask string - SourceIPs []string -} - -// Subnets returns the value of "subnets" config parameter from "multinet" section. -func Subnets(c *config.Config) []Subnet { - var result []Subnet - sub := c.Sub(subsection).Sub("subnets") - for i := 0; ; i++ { - s := sub.Sub(strconv.FormatInt(int64(i), 10)) - mask := config.StringSafe(s, "mask") - if mask == "" { - break - } - sourceIPs := config.StringSliceSafe(s, "source_ips") - result = append(result, Subnet{ - Mask: mask, - SourceIPs: sourceIPs, - }) - } - return result -} - -// Balancer returns the value of "balancer" config parameter from "multinet" section. -func Balancer(c *config.Config) string { - return config.StringSafe(c.Sub(subsection), "balancer") -} - -// Restrict returns the value of "restrict" config parameter from "multinet" section. -func Restrict(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "restrict") -} - -// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section. -func FallbackDelay(c *config.Config) time.Duration { - fd := config.DurationSafe(c.Sub(subsection), "fallback_delay") - if fd != 0 { // negative value means no fallback - return fd - } - return FallbackDelayDefault -} diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go deleted file mode 100644 index 5f7dc6d53..000000000 --- a/cmd/frostfs-node/config/multinet/config_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package multinet - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestMultinetSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - require.Equal(t, false, Enabled(empty)) - require.Equal(t, ([]Subnet)(nil), Subnets(empty)) - require.Equal(t, "", Balancer(empty)) - require.Equal(t, false, Restrict(empty)) - require.Equal(t, FallbackDelayDefault, FallbackDelay(empty)) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, true, Enabled(c)) - require.Equal(t, []Subnet{ - { - Mask: "192.168.219.174/24", - SourceIPs: []string{ - "192.168.218.185", - "192.168.219.185", - }, - }, - { - Mask: "10.78.70.74/24", - SourceIPs: []string{ - "10.78.70.185", - "10.78.71.185", - }, - }, - }, Subnets(c)) - require.Equal(t, "roundrobin", Balancer(c)) - require.Equal(t, false, Restrict(c)) - require.Equal(t, 350*time.Millisecond, FallbackDelay(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go deleted file mode 100644 index c50718c5f..000000000 --- a/cmd/frostfs-node/config/node/config.go +++ /dev/null @@ -1,214 +0,0 @@ -package nodeconfig - -import ( - "fmt" - "io/fs" - "iter" - "os" - "slices" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// PersistentSessionsConfig is a wrapper over "persistent_sessions" config section -// which provides access to persistent session tokens storage configuration of node. -type PersistentSessionsConfig struct { - cfg *config.Config -} - -// PersistentStateConfig is a wrapper over "persistent_state" config section -// which provides access to persistent state storage configuration of node. -type PersistentStateConfig struct { - cfg *config.Config -} - -// PersistentPolicyRulesConfig is a wrapper over "persistent_policy_rules" config section -// which provides access to persistent policy rules storage configuration of node. -type PersistentPolicyRulesConfig struct { - cfg *config.Config -} - -const ( - subsection = "node" - persistentSessionsSubsection = "persistent_sessions" - persistentStateSubsection = "persistent_state" - persistentPolicyRulesSubsection = "persistent_policy_rules" - - attributePrefix = "attribute" - - // PersistentStatePathDefault is a default path for persistent state file. - PersistentStatePathDefault = ".frostfs-storage-state" -) - -// Key returns the value of "key" config parameter -// from "node" section. -// -// If the value is not set, fallbacks to Wallet section. -// -// Panics if the value is incorrect filename of binary encoded private key. -func Key(c *config.Config) *keys.PrivateKey { - v := config.StringSafe(c.Sub(subsection), "key") - if v == "" { - return Wallet(c) - } - - var ( - key *keys.PrivateKey - err error - data []byte - ) - if data, err = os.ReadFile(v); err == nil { - key, err = keys.NewPrivateKeyFromBytes(data) - } - - if err != nil { - panic(fmt.Errorf("invalid private key in node section: %w", err)) - } - - return key -} - -// Wallet returns the value of a node private key from "node" section. -// -// Panics if section contains invalid values. -func Wallet(c *config.Config) *keys.PrivateKey { - v := c.Sub(subsection).Sub("wallet") - acc, err := utilConfig.LoadAccount( - config.String(v, "path"), - config.String(v, "address"), - config.String(v, "password")) - if err != nil { - panic(fmt.Errorf("invalid wallet config: %w", err)) - } - - return acc.PrivateKey() -} - -type stringAddressGroup []string - -func (x stringAddressGroup) Addresses() iter.Seq[string] { - return slices.Values(x) -} - -func (x stringAddressGroup) NumberOfAddresses() int { - return len(x) -} - -// BootstrapAddresses returns the value of "addresses" config parameter -// from "node" section as network.AddressGroup. -// -// Panics if the value is not a string list of valid NeoFS network addresses. -func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) { - v := config.StringSlice(c.Sub(subsection), "addresses") - - err := addr.FromIterator(stringAddressGroup(v)) - if err != nil { - panic(fmt.Errorf("could not parse bootstrap addresses: %w", err)) - } - - return addr -} - -// Attributes returns list of config parameters -// from "node" section that are set in "attribute_i" format, -// where i in range [0,100). -func Attributes(c *config.Config) (attrs []string) { - const maxAttributes = 100 - - for i := range maxAttributes { - attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i)) - if attr == "" { - return - } - - attrs = append(attrs, attr) - } - - return -} - -// PersistentSessions returns structure that provides access to "persistent_sessions" -// subsection of "node" section. -func PersistentSessions(c *config.Config) PersistentSessionsConfig { - return PersistentSessionsConfig{ - c.Sub(subsection).Sub(persistentSessionsSubsection), - } -} - -// Path returns the value of "path" config parameter. -func (p PersistentSessionsConfig) Path() string { - return config.String(p.cfg, "path") -} - -// PersistentState returns structure that provides access to "persistent_state" -// subsection of "node" section. -func PersistentState(c *config.Config) PersistentStateConfig { - return PersistentStateConfig{ - c.Sub(subsection).Sub(persistentStateSubsection), - } -} - -// Path returns the value of "path" config parameter. -// -// Returns PersistentStatePathDefault if the value is not a non-empty string. -func (p PersistentStateConfig) Path() string { - v := config.String(p.cfg, "path") - if v != "" { - return v - } - - return PersistentStatePathDefault -} - -const ( - // PermDefault is a default permission bits for local override storage file. - PermDefault = 0o644 -) - -// PersistentPolicyRules returns structure that provides access to "persistent_policy_rules" -// subsection of "node" section. -func PersistentPolicyRules(c *config.Config) PersistentPolicyRulesConfig { - return PersistentPolicyRulesConfig{ - c.Sub(subsection).Sub(persistentPolicyRulesSubsection), - } -} - -// Path returns the value of "path" config parameter. -// -// Returns empty string if missing, for compatibility with older configurations. -func (l PersistentPolicyRulesConfig) Path() string { - return config.StringSafe(l.cfg, "path") -} - -// Perm returns the value of "perm" config parameter as a fs.FileMode. -// -// Returns PermDefault if the value is not a positive number. -func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { - p := config.UintSafe(l.cfg, "perm") - if p == 0 { - p = PermDefault - } - - return fs.FileMode(p) -} - -// NoSync returns the value of "no_sync" config parameter as a bool value. -// -// Returns false if the value is not a boolean. -func (l PersistentPolicyRulesConfig) NoSync() bool { - return config.BoolSafe(l.cfg, "no_sync") -} - -// CompatibilityMode returns true if need to run node in compatibility with previous versions mode. -func CompatibilityMode(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") -} - -// LocodeDBPath returns path to LOCODE database. -func LocodeDBPath(c *config.Config) string { - return config.String(c.Sub(subsection), "locode_db_path") -} diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go deleted file mode 100644 index 9af1dc038..000000000 --- a/cmd/frostfs-node/config/node/config_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package nodeconfig - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/stretchr/testify/require" -) - -func TestNodeSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Panics( - t, - func() { - Key(empty) - }, - ) - - require.Panics( - t, - func() { - BootstrapAddresses(empty) - }, - ) - - attribute := Attributes(empty) - persisessionsPath := PersistentSessions(empty).Path() - persistatePath := PersistentState(empty).Path() - - require.Empty(t, attribute) - require.Equal(t, "", persisessionsPath) - require.Equal(t, PersistentStatePathDefault, persistatePath) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - key := Key(c) - addrs := BootstrapAddresses(c) - attributes := Attributes(c) - wKey := Wallet(c) - persisessionsPath := PersistentSessions(c).Path() - persistatePath := PersistentState(c).Path() - - expectedAddr := []struct { - str string - host string - }{ - { - str: "/dns4/localhost/tcp/8083/tls", - host: "grpcs://localhost:8083", - }, - { - str: "/dns4/s01.frostfs.devenv/tcp/8080", - host: "s01.frostfs.devenv:8080", - }, - { - str: "/dns4/s02.frostfs.devenv/tcp/8081", - host: "s02.frostfs.devenv:8081", - }, - { - str: "/ip4/127.0.0.1/tcp/8082", - host: "127.0.0.1:8082", - }, - } - - require.Equal(t, "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM", key.Address()) - - require.EqualValues(t, len(expectedAddr), addrs.Len()) - - ind := 0 - - addrs.IterateAddresses(func(addr network.Address) bool { - require.Equal(t, expectedAddr[ind].str, addr.String()) - require.Equal(t, expectedAddr[ind].host, addr.URIAddr()) - - ind++ - - return false - }) - - require.Len(t, attributes, 2) - require.Equal(t, "Price:11", attributes[0]) - require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) - - require.NotNil(t, wKey) - require.Equal(t, - config.StringSafe(c.Sub("node").Sub("wallet"), "address"), - address.Uint160ToString(wKey.GetScriptHash())) - - require.Equal(t, "/sessions", persisessionsPath) - require.Equal(t, "/state", persistatePath) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/node/wallet.json b/cmd/frostfs-node/config/node/wallet.json deleted file mode 100644 index 006d96023..000000000 --- a/cmd/frostfs-node/config/node/wallet.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": "3.0", - "accounts": [ - { - "address": "NcpJzXcSDrh5CCizf4K9Ro6w4t59J5LKzz", - "key": "6PYXFRFUfoMNjWd2UmaaEjwHSWpifcLLTbEfhkwXdiSZ2n2WLfr75JpxmJ", - "label": "testacc", - "contract": { - "script": "DCECaeaVhKFa+ENNUpRJLz6BRmRbkIaoN+xZt3VHzlzkHJZBVuezJw==", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "isDefault": false - } - ], - "scrypt": { - "n": 16384, - "r": 8, - "p": 8 - }, - "extra": { - "Tokens": null - } -} diff --git a/cmd/frostfs-node/config/node/wallet.key b/cmd/frostfs-node/config/node/wallet.key deleted file mode 100644 index ba2021bd5..000000000 --- a/cmd/frostfs-node/config/node/wallet.key +++ /dev/null @@ -1 +0,0 @@ -?ܳv/l \ No newline at end of file diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go deleted file mode 100644 index c8c967d30..000000000 --- a/cmd/frostfs-node/config/object/config.go +++ /dev/null @@ -1,50 +0,0 @@ -package objectconfig - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -// PutConfig is a wrapper over "put" config section which provides access -// to object put pipeline configuration of object service. -type PutConfig struct { - cfg *config.Config -} - -// GetConfig is a wrapper over "get" config section which provides access -// to object get pipeline configuration of object service. -type GetConfig struct { - cfg *config.Config -} - -const ( - subsection = "object" - - putSubsection = "put" - getSubsection = "get" -) - -// Put returns structure that provides access to "put" subsection of -// "object" section. -func Put(c *config.Config) PutConfig { - return PutConfig{ - c.Sub(subsection).Sub(putSubsection), - } -} - -// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. -func (g PutConfig) SkipSessionTokenIssuerVerification() bool { - return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") -} - -// Get returns structure that provides access to "get" subsection of -// "object" section. -func Get(c *config.Config) GetConfig { - return GetConfig{ - c.Sub(subsection).Sub(getSubsection), - } -} - -// Priority returns the value of "priority" config parameter. -func (g GetConfig) Priority() []string { - return config.StringSliceSafe(g.cfg, "priority") -} diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go deleted file mode 100644 index 1c525ef55..000000000 --- a/cmd/frostfs-node/config/object/config_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package objectconfig_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestObjectSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) - require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) - require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/object/delete.go b/cmd/frostfs-node/config/object/delete.go deleted file mode 100644 index 3a4abe195..000000000 --- a/cmd/frostfs-node/config/object/delete.go +++ /dev/null @@ -1,19 +0,0 @@ -package objectconfig - -import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - -const ( - deleteSubsection = "delete" - - // DefaultTombstoneLifetime is the default value of tombstone lifetime in epochs. - DefaultTombstoneLifetime = 5 -) - -// TombstoneLifetime returns the value of `tombstone_lifetime` config parameter. -func TombstoneLifetime(c *config.Config) uint64 { - ts := config.UintSafe(c.Sub(subsection).Sub(deleteSubsection), "tombstone_lifetime") - if ts <= 0 { - return DefaultTombstoneLifetime - } - return ts -} diff --git a/cmd/frostfs-node/config/policer/config.go b/cmd/frostfs-node/config/policer/config.go deleted file mode 100644 index 487e42be8..000000000 --- a/cmd/frostfs-node/config/policer/config.go +++ /dev/null @@ -1,33 +0,0 @@ -package policerconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "policer" - - // HeadTimeoutDefault is a default object.Head request timeout in policer. - HeadTimeoutDefault = 5 * time.Second -) - -// HeadTimeout returns the value of "head_timeout" config parameter -// from "policer" section. -// -// Returns HeadTimeoutDefault if the value is not positive duration. -func HeadTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "head_timeout") - if v > 0 { - return v - } - - return HeadTimeoutDefault -} - -// UnsafeDisable returns the value of "unsafe_disable" config parameter -// from "policer" section. -func UnsafeDisable(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "unsafe_disable") -} diff --git a/cmd/frostfs-node/config/policer/config_test.go b/cmd/frostfs-node/config/policer/config_test.go deleted file mode 100644 index 95f0c3af2..000000000 --- a/cmd/frostfs-node/config/policer/config_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package policerconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestPolicerSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Equal(t, policerconfig.HeadTimeoutDefault, policerconfig.HeadTimeout(empty)) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, 15*time.Second, policerconfig.HeadTimeout(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go deleted file mode 100644 index 6c3e8adab..000000000 --- a/cmd/frostfs-node/config/profiler/config.go +++ /dev/null @@ -1,77 +0,0 @@ -package profilerconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "pprof" - - // ShutdownTimeoutDefault is a default value for profiler HTTP service timeout. - ShutdownTimeoutDefault = 30 * time.Second - - // AddressDefault is a default value for profiler HTTP service endpoint. - AddressDefault = "localhost:6060" -) - -// Enabled returns the value of "enabled" config parameter -// from "pprof" section. -// -// Returns false if the value is missing or invalid. -func Enabled(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "enabled") -} - -// ShutdownTimeout returns the value of "shutdown_timeout" config parameter -// from "pprof" section. -// -// Returns ShutdownTimeoutDefault if the value is not positive duration. -func ShutdownTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "shutdown_timeout") - if v > 0 { - return v - } - - return ShutdownTimeoutDefault -} - -// Address returns the value of "address" config parameter -// from "pprof" section. -// -// Returns AddressDefault if the value is not set. -func Address(c *config.Config) string { - s := c.Sub(subsection) - - v := config.StringSafe(s, "address") - if v != "" { - return v - } - - return AddressDefault -} - -// BlockRate returns the value of "block_rate" config parameter -// from "pprof" section. -func BlockRate(c *config.Config) int { - s := c.Sub(subsection) - - v := int(config.IntSafe(s, "block_rate")) - if v <= 0 { - return 0 - } - return v -} - -// MutexRate returns the value of "mutex_rate" config parameter -// from "pprof" section. -func MutexRate(c *config.Config) int { - s := c.Sub(subsection) - - v := int(config.IntSafe(s, "mutex_rate")) - if v <= 0 { - return 0 - } - return v -} diff --git a/cmd/frostfs-node/config/profiler/config_test.go b/cmd/frostfs-node/config/profiler/config_test.go deleted file mode 100644 index 2f1cb1788..000000000 --- a/cmd/frostfs-node/config/profiler/config_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package profilerconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestProfilerSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - to := profilerconfig.ShutdownTimeout(configtest.EmptyConfig()) - addr := profilerconfig.Address(configtest.EmptyConfig()) - - require.Equal(t, profilerconfig.ShutdownTimeoutDefault, to) - require.Equal(t, profilerconfig.AddressDefault, addr) - require.False(t, profilerconfig.Enabled(configtest.EmptyConfig())) - - require.Zero(t, profilerconfig.BlockRate(configtest.EmptyConfig())) - require.Zero(t, profilerconfig.MutexRate(configtest.EmptyConfig())) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - to := profilerconfig.ShutdownTimeout(c) - addr := profilerconfig.Address(c) - - require.Equal(t, 15*time.Second, to) - require.Equal(t, "localhost:6060", addr) - require.True(t, profilerconfig.Enabled(c)) - - require.Equal(t, 10_000, profilerconfig.BlockRate(c)) - require.Equal(t, 10_000, profilerconfig.MutexRate(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go deleted file mode 100644 index 85f8180ed..000000000 --- a/cmd/frostfs-node/config/qos/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package qos - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -const ( - subsection = "qos" - criticalSubSection = "critical" - internalSubSection = "internal" -) - -// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config -// parameter from "qos" section. -// -// Returns an empty list if not set. -func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys { - return authorizedKeys(c, criticalSubSection) -} - -// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config -// parameter from "qos" section. -// -// Returns an empty list if not set. -func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys { - return authorizedKeys(c, internalSubSection) -} - -func authorizedKeys(c *config.Config, sub string) keys.PublicKeys { - strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys") - pubs := make(keys.PublicKeys, 0, len(strKeys)) - - for i := range strKeys { - pub, err := keys.NewPublicKeyFromString(strKeys[i]) - if err != nil { - panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err)) - } - - pubs = append(pubs, pub) - } - - return pubs -} diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go deleted file mode 100644 index b3b6019cc..000000000 --- a/cmd/frostfs-node/config/qos/config_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package qos - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestQoSSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Empty(t, CriticalAuthorizedKeys(empty)) - require.Empty(t, InternalAuthorizedKeys(empty)) - }) - - const path = "../../../../config/example/node" - - criticalPubs := make(keys.PublicKeys, 2) - criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") - criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") - - internalPubs := make(keys.PublicKeys, 2) - internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2") - internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a") - - fileConfigTest := func(c *config.Config) { - require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c)) - require.Equal(t, internalPubs, InternalAuthorizedKeys(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go deleted file mode 100644 index e954bf19d..000000000 --- a/cmd/frostfs-node/config/replicator/config.go +++ /dev/null @@ -1,42 +0,0 @@ -package replicatorconfig - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "replicator" - - // PutTimeoutDefault is a default timeout of object put request in replicator. - PutTimeoutDefault = 5 * time.Second - // PoolSizeDefault is a default pool size for put request in replicator. - PoolSizeDefault = 10 -) - -// PutTimeout returns the value of "put_timeout" config parameter -// from "replicator" section. -// -// Returns PutTimeoutDefault if the value is not positive duration. -func PutTimeout(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection), "put_timeout") - if v > 0 { - return v - } - - return PutTimeoutDefault -} - -// PoolSize returns the value of "pool_size" config parameter -// from "replicator" section. -// -// Returns PoolSizeDefault if the value is non-positive integer. -func PoolSize(c *config.Config) int { - v := int(config.IntSafe(c.Sub(subsection), "pool_size")) - if v > 0 { - return v - } - - return PoolSizeDefault -} diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go deleted file mode 100644 index 2aa490946..000000000 --- a/cmd/frostfs-node/config/replicator/config_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package replicatorconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestReplicatorSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) - require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, 15*time.Second, replicatorconfig.PutTimeout(c)) - require.Equal(t, 10, replicatorconfig.PoolSize(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go deleted file mode 100644 index e0efdfde2..000000000 --- a/cmd/frostfs-node/config/rpc/config.go +++ /dev/null @@ -1,42 +0,0 @@ -package rpcconfig - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "rpc" - limitsSubsection = "limits" -) - -type LimitConfig struct { - Methods []string - MaxOps int64 -} - -// Limits returns the "limits" config from "rpc" section. -func Limits(c *config.Config) []LimitConfig { - c = c.Sub(subsection).Sub(limitsSubsection) - - var limits []LimitConfig - - for i := uint64(0); ; i++ { - si := strconv.FormatUint(i, 10) - sc := c.Sub(si) - - methods := config.StringSliceSafe(sc, "methods") - if len(methods) == 0 { - break - } - - if sc.Value("max_ops") == nil { - panic("no max operations for method group") - } - - limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) - } - - return limits -} diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go deleted file mode 100644 index a6365e19f..000000000 --- a/cmd/frostfs-node/config/rpc/config_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package rpcconfig - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestRPCSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - require.Empty(t, Limits(configtest.EmptyConfig())) - }) - - t.Run("correct config", func(t *testing.T) { - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - limits := Limits(c) - require.Len(t, limits, 2) - - limit0 := limits[0] - limit1 := limits[1] - - require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) - require.Equal(t, limit0.MaxOps, int64(1000)) - - require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) - require.Equal(t, limit1.MaxOps, int64(10000)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) - - t.Run("no max operations", func(t *testing.T) { - const path = "testdata/no_max_ops" - - fileConfigTest := func(c *config.Config) { - require.Panics(t, func() { _ = Limits(c) }) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) - - t.Run("zero max operations", func(t *testing.T) { - const path = "testdata/zero_max_ops" - - fileConfigTest := func(c *config.Config) { - limits := Limits(c) - require.Len(t, limits, 2) - - limit0 := limits[0] - limit1 := limits[1] - - require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) - require.Equal(t, limit0.MaxOps, int64(0)) - - require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) - require.Equal(t, limit1.MaxOps, int64(10000)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) -} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env deleted file mode 100644 index 2fed4c5bc..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env +++ /dev/null @@ -1,3 +0,0 @@ -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json deleted file mode 100644 index 6156aa71d..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ] - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - } -} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml deleted file mode 100644 index e50b7ae93..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml +++ /dev/null @@ -1,8 +0,0 @@ -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env deleted file mode 100644 index ce7302b0b..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env +++ /dev/null @@ -1,4 +0,0 @@ -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_0_MAX_OPS=0 -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json deleted file mode 100644 index 16a1c173f..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ], - "max_ops": 0 - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - } -} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml deleted file mode 100644 index 525d768d4..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml +++ /dev/null @@ -1,9 +0,0 @@ -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 0 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 diff --git a/cmd/frostfs-node/config/runtime/config.go b/cmd/frostfs-node/config/runtime/config.go deleted file mode 100644 index ad6cce43b..000000000 --- a/cmd/frostfs-node/config/runtime/config.go +++ /dev/null @@ -1,23 +0,0 @@ -package runtime - -import ( - "math" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "runtime" - memoryLimitDefault = math.MaxInt64 -) - -// GCMemoryLimitBytes returns the value of "soft_memory_limit" config parameter from "runtime" section. -func GCMemoryLimitBytes(c *config.Config) int64 { - l := config.SizeInBytesSafe(c.Sub(subsection), "soft_memory_limit") - - if l > 0 { - return int64(l) - } - - return memoryLimitDefault -} diff --git a/cmd/frostfs-node/config/runtime/config_test.go b/cmd/frostfs-node/config/runtime/config_test.go deleted file mode 100644 index 1bfa42ad8..000000000 --- a/cmd/frostfs-node/config/runtime/config_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package runtime - -import ( - "math" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestGCMemoryLimit(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Equal(t, int64(math.MaxInt64), GCMemoryLimitBytes(empty)) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - require.Equal(t, int64(1073741824), GCMemoryLimitBytes(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go deleted file mode 100644 index e98c032f0..000000000 --- a/cmd/frostfs-node/config/test/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package configtest - -import ( - "bufio" - "os" - "strings" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "github.com/stretchr/testify/require" -) - -func fromFile(path string) *config.Config { - return config.New(path, "", "") -} - -func fromEnvFile(t testing.TB, path string) *config.Config { - loadEnv(t, path) // github.com/joho/godotenv can do that as well - - return config.New("", "", config.EnvPrefix) -} - -func forEachFile(paths []string, f func(*config.Config)) { - for i := range paths { - f(fromFile(paths[i])) - } -} - -// ForEachFileType passes configs read from next files: -// - `.yaml`; -// - `.json`. -func ForEachFileType(pref string, f func(*config.Config)) { - forEachFile([]string{ - pref + ".yaml", - pref + ".json", - }, f) -} - -// ForEnvFileType creates config from `.env` file. -func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) { - f(fromEnvFile(t, pref+".env")) -} - -// EmptyConfig returns config without any values and sections. -func EmptyConfig() *config.Config { - return config.New("", "", config.EnvPrefix) -} - -// loadEnv reads .env file, parses `X=Y` records and sets OS ENVs. -func loadEnv(t testing.TB, path string) { - f, err := os.Open(path) - require.NoError(t, err, "can't open .env file") - - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - k, v, found := strings.Cut(scanner.Text(), "=") - if !found { - continue - } - - v = strings.Trim(v, `"`) - - t.Setenv(k, v) - } -} diff --git a/cmd/frostfs-node/config/test/config.json b/cmd/frostfs-node/config/test/config.json deleted file mode 100644 index 7eec1b73e..000000000 --- a/cmd/frostfs-node/config/test/config.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "value": "some value", - - "section": { - "any": "thing", - "sub": { - "sub": { - "sub1": { - "key": "val1" - }, - "sub2": { - "key": "val2" - } - } - } - }, - - "string_slice": { - "empty": [], - "filled": [ - "string1", - "string2" - ], - "incorrect": null - }, - - "string": { - "correct": "some string", - "incorrect": [] - }, - - "duration": { - "correct": "15m", - "incorrect": "some string" - }, - - "number": { - "int_pos": 1, - "int_neg": -1, - "fract_pos": 2.5, - "fract_neg": -2.5, - "incorrect": "some string" - }, - - "bool": { - "correct": true, - "correct_string": "true", - "incorrect": "not true" - }, - - "sizes": { - "size_b": "1b", - "size_k": "1 k", - "size_kb": "1 kb", - "size_kb_no_space": "2kb", - "size_m": "12m", - "size_mb": "12mb", - "size_g": "4g", - "size_gb": "4gb", - "size_t": "5 T", - "size_tb": "5 TB", - "size_float": ".5t", - "size_float_big": "14.123 gb", - "size_i_am_not_very_clever": "12.12345678", - "size_bytes": "2048b", - "size_bytes_no_suffix": 123456 - }, - - "with_default": { - "default": { - "sub": { - "missing": 123, - "overridden": "x" - }, - "missing": 42, - "overridden": "a", - "overridden_with_default": true - }, - "custom": { - "sub": { - "overridden": "y" - }, - "overridden": "b", - "overridden_with_default": false - } - } -} diff --git a/cmd/frostfs-node/config/test/config.yaml b/cmd/frostfs-node/config/test/config.yaml deleted file mode 100644 index 3ae459ea2..000000000 --- a/cmd/frostfs-node/config/test/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -value: some value - -section: - any: thing - - sub: - sub: - sub1: - key: val1 - - sub2: - key: val2 - -string_slice: - empty: [] - - filled: - - string1 - - string2 - - incorrect: - -string: - correct: some string - - incorrect: [] - -duration: - correct: 15m - incorrect: some string - -number: - int_pos: 1 - int_neg: -1 - fract_pos: 2.5 - fract_neg: -2.5 - incorrect: some string - -bool: - correct: true - correct_string: "true" - incorrect: not true - -sizes: - size_b: 1b - size_k: 1 k - size_kb: 1 kb - size_kb_no_space: 2kb - size_m: 12m - size_mb: 12mb - size_g: 4g - size_gb: 4gb - size_t: 5 T - size_tb: 5 TB - size_float: .5t - size_float_big: 14.123 gb - size_i_am_not_very_clever: 12.12345678 - size_bytes: 2048b - size_bytes_no_suffix: 123456 - -with_default: - default: - sub: - missing: 123 - overridden: "x" - missing: 42 - overridden: "a" - overridden_with_default: true - custom: - sub: - overridden: "y" - overridden: "b" - overridden_with_default: false diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go deleted file mode 100644 index 91ef669ee..000000000 --- a/cmd/frostfs-node/config/tracing/config.go +++ /dev/null @@ -1,71 +0,0 @@ -package tracing - -import ( - "crypto/x509" - "errors" - "fmt" - "os" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" -) - -const ( - subsection = "tracing" -) - -// ToTracingConfig extracts tracing config. -func ToTracingConfig(c *config.Config) (*tracing.Config, error) { - conf := &tracing.Config{ - Enabled: config.BoolSafe(c.Sub(subsection), "enabled"), - Exporter: tracing.Exporter(config.StringSafe(c.Sub(subsection), "exporter")), - Endpoint: config.StringSafe(c.Sub(subsection), "endpoint"), - Service: "frostfs-node", - InstanceID: getInstanceIDOrDefault(c), - Version: misc.Version, - Attributes: make(map[string]string), - } - - if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" { - caBytes, err := os.ReadFile(trustedCa) - if err != nil { - return nil, fmt.Errorf("cannot read trusted ca cert by path: %w", err) - } - certPool := x509.NewCertPool() - ok := certPool.AppendCertsFromPEM(caBytes) - if !ok { - return nil, errors.New("can't fill cert pool by ca cert") - } - conf.ServerCaCertPool = certPool - } - - i := uint64(0) - for ; ; i++ { - si := strconv.FormatUint(i, 10) - ac := c.Sub(subsection).Sub("attributes").Sub(si) - k := config.StringSafe(ac, "key") - if k == "" { - break - } - v := config.StringSafe(ac, "value") - if v == "" { - return nil, fmt.Errorf("empty tracing attribute value for key %s", k) - } - if _, ok := conf.Attributes[k]; ok { - return nil, fmt.Errorf("tracing attribute key %s defined more than once", k) - } - conf.Attributes[k] = v - } - - return conf, nil -} - -func getInstanceIDOrDefault(c *config.Config) string { - s := config.StringSliceSafe(c.Sub("node"), "addresses") - if len(s) > 0 { - return s[0] - } - return "" -} diff --git a/cmd/frostfs-node/config/tracing/config_test.go b/cmd/frostfs-node/config/tracing/config_test.go deleted file mode 100644 index 8e485ca6e..000000000 --- a/cmd/frostfs-node/config/tracing/config_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package tracing - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "github.com/stretchr/testify/require" -) - -func TestTracingSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - tc, err := ToTracingConfig(configtest.EmptyConfig()) - require.NoError(t, err) - require.Equal(t, false, tc.Enabled) - require.Equal(t, tracing.Exporter(""), tc.Exporter) - require.Equal(t, "", tc.Endpoint) - require.Equal(t, "frostfs-node", tc.Service) - require.Equal(t, "", tc.InstanceID) - require.Nil(t, tc.ServerCaCertPool) - require.Empty(t, tc.Attributes) - }) - - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - tc, err := ToTracingConfig(c) - require.NoError(t, err) - require.Equal(t, true, tc.Enabled) - require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter) - require.Equal(t, "localhost", tc.Endpoint) - require.Equal(t, "frostfs-node", tc.Service) - require.Nil(t, tc.ServerCaCertPool) - require.EqualValues(t, map[string]string{ - "key0": "value", - "key1": "value", - }, tc.Attributes) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/tree/config.go b/cmd/frostfs-node/config/tree/config.go deleted file mode 100644 index a3c3d691b..000000000 --- a/cmd/frostfs-node/config/tree/config.go +++ /dev/null @@ -1,113 +0,0 @@ -package treeconfig - -import ( - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -const ( - subsection = "tree" - - SyncBatchSizeDefault = 1000 -) - -// TreeConfig is a wrapper over "tree" config section -// which provides access to the configuration of the tree service. -type TreeConfig struct { - cfg *config.Config -} - -// Tree returns structure that provides access to a "tree" -// configuration subsection. -func Tree(c *config.Config) TreeConfig { - return TreeConfig{ - c.Sub(subsection), - } -} - -// Enabled returns the value of "enabled" config parameter -// from the "tree" section. -// -// Returns `false` if config value is not specified. -func (c TreeConfig) Enabled() bool { - return config.BoolSafe(c.cfg, "enabled") -} - -// CacheSize returns the value of "cache_size" config parameter -// from the "tree" section. -// -// Returns `0` if config value is not specified. -func (c TreeConfig) CacheSize() int { - return int(config.IntSafe(c.cfg, "cache_size")) -} - -// ReplicationTimeout returns the value of "replication_timeout" -// config parameter from the "tree" section. -// -// Returns `0` if config value is not specified. -func (c TreeConfig) ReplicationTimeout() time.Duration { - return config.DurationSafe(c.cfg, "replication_timeout") -} - -// ReplicationChannelCapacity returns the value of "replication_channel_capacity" -// config parameter from the "tree" section. -// -// Returns `0` if config value is not specified. -func (c TreeConfig) ReplicationChannelCapacity() int { - return int(config.IntSafe(c.cfg, "replication_channel_capacity")) -} - -// ReplicationWorkerCount returns the value of "replication_worker_count" -// config parameter from the "tree" section. -// -// Returns `0` if config value is not specified. -func (c TreeConfig) ReplicationWorkerCount() int { - return int(config.IntSafe(c.cfg, "replication_worker_count")) -} - -// SyncInterval returns the value of "sync_interval" -// config parameter from the "tree" section. -// -// Returns 0 if config value is not specified. -func (c TreeConfig) SyncInterval() time.Duration { - return config.DurationSafe(c.cfg, "sync_interval") -} - -// SyncBatchSize returns the value of "sync_batch_size" -// config parameter from the "tree" section. -// -// Returns `SyncBatchSizeDefault` if config value is not specified. -func (c TreeConfig) SyncBatchSize() int { - if v := config.IntSafe(c.cfg, "sync_batch_size"); v > 0 { - return int(v) - } - return SyncBatchSizeDefault -} - -// UnsafeSyncDisabled returns the value of "unsafe_sync_disabled" -// config parameter from the "tree" section. -func (c TreeConfig) UnsafeSyncDisabled() bool { - return config.BoolSafe(c.cfg, "unsafe_sync_disabled") -} - -// AuthorizedKeys parses and returns an array of "authorized_keys" config -// parameter from "tree" section. -// -// Returns an empty list if not set. -func (c TreeConfig) AuthorizedKeys() keys.PublicKeys { - authorizedKeysStr := config.StringSliceSafe(c.cfg, "authorized_keys") - authorizedKeys := make(keys.PublicKeys, 0, len(authorizedKeysStr)) - - for i := range authorizedKeysStr { - pub, err := keys.NewPublicKeyFromString(authorizedKeysStr[i]) - if err != nil { - panic(fmt.Errorf("could not parse Tree authorized key %s: %w", authorizedKeysStr[i], err)) - } - - authorizedKeys = append(authorizedKeys, pub) - } - return authorizedKeys -} diff --git a/cmd/frostfs-node/config/tree/config_test.go b/cmd/frostfs-node/config/tree/config_test.go deleted file mode 100644 index 6628b8878..000000000 --- a/cmd/frostfs-node/config/tree/config_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package treeconfig_test - -import ( - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestTreeSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - treeSec := treeconfig.Tree(empty) - - require.False(t, treeSec.Enabled()) - require.Equal(t, 0, treeSec.CacheSize()) - require.Equal(t, 0, treeSec.ReplicationChannelCapacity()) - require.Equal(t, 0, treeSec.ReplicationWorkerCount()) - require.Equal(t, time.Duration(0), treeSec.ReplicationTimeout()) - require.Equal(t, 0, len(treeSec.AuthorizedKeys())) - }) - - const path = "../../../../config/example/node" - - var expectedKeys keys.PublicKeys - key, err := keys.NewPublicKeyFromString("0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0") - require.NoError(t, err) - expectedKeys = append(expectedKeys, key) - key, err = keys.NewPublicKeyFromString("02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56") - require.NoError(t, err) - expectedKeys = append(expectedKeys, key) - - fileConfigTest := func(c *config.Config) { - treeSec := treeconfig.Tree(c) - - require.True(t, treeSec.Enabled()) - require.Equal(t, 15, treeSec.CacheSize()) - require.Equal(t, 32, treeSec.ReplicationChannelCapacity()) - require.Equal(t, 32, treeSec.ReplicationWorkerCount()) - require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout()) - require.Equal(t, time.Hour, treeSec.SyncInterval()) - require.Equal(t, 2000, treeSec.SyncBatchSize()) - require.Equal(t, expectedKeys, treeSec.AuthorizedKeys()) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go deleted file mode 100644 index bdb280d87..000000000 --- a/cmd/frostfs-node/container.go +++ /dev/null @@ -1,255 +0,0 @@ -package main - -import ( - "bytes" - "context" - "net" - - containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" - containerTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/container/grpc" - containerService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" - containerMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph" - containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -func initContainerService(_ context.Context, c *cfg) { - // container wrapper that tries to invoke notary - // requests if chain is configured so - wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) - fatalOnErr(err) - - c.cnrClient = wrap - - cnrSrc := cntClient.AsContainerSource(wrap) - - cnrRdr, cnrWrt := configureEACLAndContainerSources(c, wrap, cnrSrc) - - var frostfsIDSubjectProvider frostfsidcore.SubjectProvider - frostfsIDSubjectProvider, err = frostfsid.NewFromMorph(c.cfgMorph.client, c.cfgFrostfsID.scriptHash, 0) - fatalOnErr(err) - - cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) - if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { - frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) - } - - c.frostfsidClient = frostfsIDSubjectProvider - c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) - - defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( - c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), - c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), - ) - service := containerService.NewSignService( - &c.key.PrivateKey, - containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, - containerService.NewSplitterService( - c.cfgContainer.containerBatchSize, c.respSvc, - containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), - ), - ) - service = containerService.NewAuditService(service, c.log, c.audit) - server := containerTransportGRPC.New(service) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - containerGRPC.RegisterContainerServiceServer(s, server) - - // TODO(@aarifullin): #1487 remove the dual service support. - s.RegisterService(frostFSServiceDesc(containerGRPC.ContainerService_ServiceDesc), server) - }) - - c.cfgObject.cfgLocalStorage.localStorage.SetContainerSource(cnrRdr) -} - -func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc containerCore.Source) (*morphContainerReader, *morphContainerWriter) { - cnrRdr := new(morphContainerReader) - - cnrWrt := &morphContainerWriter{ - neoClient: client, - } - - if c.cfgMorph.cacheTTL <= 0 { - c.cfgObject.cnrSource = cnrSrc - cnrRdr.src = cnrSrc - cnrRdr.lister = client - } else { - // use RPC node as source of Container contract items (with caching) - c.cfgObject.cnrSource = cnrSrc - if c.cfgMorph.containerCacheSize > 0 { - containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize) - - subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) { - ev := e.(containerEvent.PutSuccess) - - // read owner of the created container in order to update the reading cache. - // TODO: use owner directly from the event after neofs-contract#256 will become resolved - // but don't forget about the profit of reading the new container and caching it: - // creation success are most commonly tracked by polling GET op. - cnr, err := cnrSrc.Get(ctx, ev.ID) - if err == nil { - containerCache.containerCache.set(ev.ID, cnr, nil) - } else { - // unlike removal, we expect successful receive of the container - // after successful creation, so logging can be useful - c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, - zap.Stringer("id", ev.ID), - zap.Error(err), - ) - } - - c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt, - zap.Stringer("id", ev.ID), - ) - }) - - subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) { - ev := e.(containerEvent.DeleteSuccess) - containerCache.handleRemoval(ev.ID) - c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt, - zap.Stringer("id", ev.ID), - ) - }) - c.cfgObject.cnrSource = containerCache - } - - cnrRdr.lister = client - cnrRdr.src = c.cfgObject.cnrSource - } - - return cnrRdr, cnrWrt -} - -// addContainerNotificationHandler adds handler that will be executed synchronously. -func addContainerNotificationHandler(c *cfg, sTyp string, h event.Handler) { - typ := event.TypeFromString(sTyp) - - if c.cfgContainer.subscribers == nil { - c.cfgContainer.subscribers = make(map[event.Type][]event.Handler, 1) - } - - c.cfgContainer.subscribers[typ] = append(c.cfgContainer.subscribers[typ], h) -} - -// addContainerAsyncNotificationHandler adds handler that will be executed asynchronously via container workerPool. -func addContainerAsyncNotificationHandler(c *cfg, sTyp string, h event.Handler) { - addContainerNotificationHandler( - c, - sTyp, - event.WorkerPoolHandler( - c.cfgContainer.workerPool, - h, - c.log, - ), - ) -} - -// stores already registered parsers of the notification events thrown by Container contract. -// MUST NOT be used concurrently. -var mRegisteredParsersContainer = make(map[string]struct{}) - -// registers event parser by name once. MUST NOT be called concurrently. -func registerEventParserOnceContainer(c *cfg, name string, p event.NotificationParser) { - if _, ok := mRegisteredParsersContainer[name]; !ok { - setContainerNotificationParser(c, name, p) - mRegisteredParsersContainer[name] = struct{}{} - } -} - -// subscribes to successful container creation. Provided handler is called asynchronously -// on corresponding routine pool. MUST NOT be called concurrently with itself and other -// similar functions. -func subscribeToContainerCreation(c *cfg, h event.Handler) { - const eventNameContainerCreated = "PutSuccess" - registerEventParserOnceContainer(c, eventNameContainerCreated, containerEvent.ParsePutSuccess) - addContainerAsyncNotificationHandler(c, eventNameContainerCreated, h) -} - -// like subscribeToContainerCreation but for removal. -func subscribeToContainerRemoval(c *cfg, h event.Handler) { - const eventNameContainerRemoved = "DeleteSuccess" - registerEventParserOnceContainer(c, eventNameContainerRemoved, containerEvent.ParseDeleteSuccess) - addContainerAsyncNotificationHandler(c, eventNameContainerRemoved, h) -} - -func setContainerNotificationParser(c *cfg, sTyp string, p event.NotificationParser) { - typ := event.TypeFromString(sTyp) - - if c.cfgContainer.parsers == nil { - c.cfgContainer.parsers = make(map[event.Type]event.NotificationParser, 1) - } - - c.cfgContainer.parsers[typ] = p -} - -func (c *cfg) PublicKey() []byte { - return nodeKeyFromNetmap(c) -} - -func (c *cfg) IsLocalKey(key []byte) bool { - return bytes.Equal(key, c.PublicKey()) -} - -func (c *cfg) IterateAddresses(f func(string) bool) { - c.iterateNetworkAddresses(f) -} - -func (c *cfg) NumberOfAddresses() int { - return c.addressNum() -} - -func (c *cfg) ExternalAddresses() []string { - return c.cfgNodeInfo.localInfo.ExternalAddresses() -} - -// implements interface required by container service provided by morph executor. -type morphContainerReader struct { - src containerCore.Source - - lister interface { - ContainersOf(context.Context, *user.ID) ([]cid.ID, error) - IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error - } -} - -func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { - return x.src.Get(ctx, id) -} - -func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { - return x.src.DeletionInfo(ctx, id) -} - -func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { - return x.lister.ContainersOf(ctx, id) -} - -func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error { - return x.lister.IterateContainersOf(ctx, id, processCID) -} - -type morphContainerWriter struct { - neoClient *cntClient.Client -} - -func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) { - return cntClient.Put(ctx, m.neoClient, cnr) -} - -func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error { - return cntClient.Delete(ctx, m.neoClient, witness) -} diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go deleted file mode 100644 index 1825013c7..000000000 --- a/cmd/frostfs-node/control.go +++ /dev/null @@ -1,128 +0,0 @@ -package main - -import ( - "context" - "fmt" - "net" - - controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -const serviceNameControl = "control" - -func initControlService(ctx context.Context, c *cfg) { - endpoint := controlconfig.GRPC(c.appCfg).Endpoint() - if endpoint == controlconfig.GRPCEndpointDefault { - return - } - - pubs := controlconfig.AuthorizedKeys(c.appCfg) - rawPubs := make([][]byte, 0, len(pubs)+1) // +1 for node key - - rawPubs = append(rawPubs, c.key.PublicKey().Bytes()) - - for i := range pubs { - rawPubs = append(rawPubs, pubs[i].Bytes()) - } - - ctlSvc := controlSvc.New( - controlSvc.WithKey(&c.key.PrivateKey), - controlSvc.WithAuthorizedKeys(rawPubs), - controlSvc.WithHealthChecker(c), - controlSvc.WithNetMapSource(c.netMapSource), - controlSvc.WithContainerSource(c.cfgObject.cnrSource), - controlSvc.WithReplicator(c.replicator), - controlSvc.WithNodeState(c), - controlSvc.WithLocalStorage(c.cfgObject.cfgLocalStorage.localStorage), - controlSvc.WithTreeService(c.treeService), - controlSvc.WithLocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine), - ) - - lis, err := net.Listen("tcp", endpoint) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) - return - } - - c.cfgControlService.server = grpc.NewServer( - grpc.ChainUnaryInterceptor( - qos.NewSetCriticalIOTagUnaryServerInterceptor(), - metrics.NewUnaryServerInterceptor(), - tracing.NewUnaryServerInterceptor(), - ), - // control service has no stream methods, so no stream interceptors added - ) - - c.onShutdown(func() { - stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) - }) - - control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc) - - c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { - runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) { - c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, - zap.String("service", serviceNameControl), - zap.String("endpoint", endpoint)) - fatalOnErr(c.cfgControlService.server.Serve(lis)) - }) - })) -} - -func (c *cfg) NetmapStatus() control.NetmapStatus { - return c.cfgNetmap.state.controlNetmapStatus() -} - -func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) { - c.notifySystemd(ctx, st) - c.healthStatus.Store(int32(st)) - c.metricsCollector.State().SetHealth(int32(st)) -} - -func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { - if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { - c.notifySystemd(ctx, newSt) - c.metricsCollector.State().SetHealth(int32(newSt)) - } - return -} - -func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) { - old = control.HealthStatus(c.healthStatus.Swap(int32(st))) - c.notifySystemd(ctx, st) - c.metricsCollector.State().SetHealth(int32(st)) - return -} - -func (c *cfg) HealthStatus() control.HealthStatus { - return control.HealthStatus(c.healthStatus.Load()) -} - -func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) { - if !c.sdNotify { - return - } - var err error - switch st { - case control.HealthStatus_READY: - err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled) - case control.HealthStatus_SHUTTING_DOWN: - err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled) - case control.HealthStatus_RECONFIGURING: - err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled) - default: - err = sdnotify.Status(fmt.Sprintf("%v", st)) - } - if err != nil { - c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) - } -} diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go deleted file mode 100644 index d2d4e9785..000000000 --- a/cmd/frostfs-node/frostfsid.go +++ /dev/null @@ -1,117 +0,0 @@ -package main - -import ( - "context" - "strings" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type subjectWithError struct { - subject *client.Subject - err error -} - -type subjectExtWithError struct { - subject *client.SubjectExtended - err error -} - -type morphFrostfsIDCache struct { - subjProvider frostfsidcore.SubjectProvider - - subjCache *expirable.LRU[util.Uint160, subjectWithError] - - subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError] - - metrics cacheMetrics -} - -func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider { - return &morphFrostfsIDCache{ - subjProvider: subjProvider, - - subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl), - - subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl), - - metrics: metrics, - } -} - -func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { - hit := false - startedAt := time.Now() - defer func() { - m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit) - }() - - result, found := m.subjCache.Get(addr) - if found { - hit = true - return result.subject, result.err - } - - subj, err := m.subjProvider.GetSubject(ctx, addr) - if err != nil { - if m.isCacheableError(err) { - m.subjCache.Add(addr, subjectWithError{ - err: err, - }) - } - return nil, err - } - - m.subjCache.Add(addr, subjectWithError{subject: subj}) - return subj, nil -} - -func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { - hit := false - startedAt := time.Now() - defer func() { - m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit) - }() - - result, found := m.subjExtCache.Get(addr) - if found { - hit = true - return result.subject, result.err - } - - subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) - if err != nil { - if m.isCacheableError(err) { - m.subjExtCache.Add(addr, subjectExtWithError{ - err: err, - }) - m.subjCache.Add(addr, subjectWithError{ - err: err, - }) - } - return nil, err - } - - m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt}) - m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)}) - - return subjExt, nil -} - -func (m *morphFrostfsIDCache) isCacheableError(err error) bool { - return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) -} - -func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject { - return &client.Subject{ - PrimaryKey: subjExt.PrimaryKey, - AdditionalKeys: subjExt.AdditionalKeys, - Namespace: subjExt.Namespace, - Name: subjExt.Name, - KV: subjExt.KV, - } -} diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go deleted file mode 100644 index 6b6d44750..000000000 --- a/cmd/frostfs-node/grpc.go +++ /dev/null @@ -1,286 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "time" - - grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" - rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -const maxRecvMsgSize = 256 << 20 - -func initGRPC(ctx context.Context, c *cfg) { - var endpointsToReconnect []string - var successCount int - grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { - serverOpts, ok := getGrpcServerOpts(ctx, c, sc) - if !ok { - return - } - - lis, err := net.Listen("tcp", sc.Endpoint()) - if err != nil { - c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint()) - c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) - endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint()) - return - } - c.metricsCollector.GrpcServerMetrics().MarkHealthy(sc.Endpoint()) - - srv := grpc.NewServer(serverOpts...) - - c.onShutdown(func() { - stopGRPC(ctx, "FrostFS Public API", srv, c.log) - }) - - c.cfgGRPC.append(sc.Endpoint(), lis, srv) - successCount++ - }) - - if successCount == 0 { - fatalOnErr(errors.New("could not listen to any gRPC endpoints")) - } - c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg) - - for _, endpoint := range endpointsToReconnect { - scheduleReconnect(ctx, endpoint, c) - } -} - -func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) { - c.wg.Add(1) - go func() { - defer c.wg.Done() - - t := time.NewTicker(c.cfgGRPC.reconnectTimeout) - for { - select { - case <-t.C: - if tryReconnect(ctx, endpoint, c) { - return - } - case <-c.done: - return - } - } - }() -} - -func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool { - c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) - - serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c) - if !found { - c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) - return true - } - - lis, err := net.Listen("tcp", endpoint) - if err != nil { - c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint) - c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) - c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) - return false - } - c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint) - - srv := grpc.NewServer(serverOpts...) - - c.onShutdown(func() { - stopGRPC(ctx, "FrostFS Public API", srv, c.log) - }) - - c.cfgGRPC.appendAndHandle(endpoint, lis, srv) - - c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) - return true -} - -func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { - unlock := c.LockAppConfigShared() - defer unlock() - grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { - if found { - return - } - if sc.Endpoint() != endpoint { - return - } - var ok bool - result, ok = getGrpcServerOpts(ctx, c, sc) - if !ok { - return - } - found = true - }) - return -} - -func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { - serverOpts := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(maxRecvMsgSize), - grpc.ChainUnaryInterceptor( - qos.NewUnaryServerInterceptor(), - metrics.NewUnaryServerInterceptor(), - tracing.NewUnaryServerInterceptor(), - qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), - ), - grpc.ChainStreamInterceptor( - qos.NewStreamServerInterceptor(), - metrics.NewStreamServerInterceptor(), - tracing.NewStreamServerInterceptor(), - qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), - ), - } - - tlsCfg := sc.TLS() - - if tlsCfg != nil { - cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) - return nil, false - } - - var cipherSuites []uint16 - if !tlsCfg.UseInsecureCrypto() { - // This more or less follows the list in https://wiki.mozilla.org/Security/Server_Side_TLS - // excluding: - // 1. TLS 1.3 suites need not be specified here. - // 2. Suites that use DH key exchange are not implemented by stdlib. - cipherSuites = []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - } - } - creds := credentials.NewTLS(&tls.Config{ - MinVersion: tls.VersionTLS12, - CipherSuites: cipherSuites, - Certificates: []tls.Certificate{cert}, - }) - - serverOpts = append(serverOpts, grpc.Creds(creds)) - } - - return serverOpts, true -} - -func serveGRPC(ctx context.Context, c *cfg) { - c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) { - c.wg.Add(1) - - go func() { - defer func() { - c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint, - zap.Stringer("endpoint", l.Addr()), - ) - - c.wg.Done() - }() - - c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, - zap.String("service", "gRPC"), - zap.Stringer("endpoint", l.Addr()), - ) - - if err := s.Serve(l); err != nil { - c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e) - c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err)) - c.cfgGRPC.dropConnection(e) - scheduleReconnect(ctx, e, c) - } - }() - }) -} - -func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) { - l = l.With(zap.String("name", name)) - - l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer) - - // GracefulStop() may freeze forever, see #1270 - done := make(chan struct{}) - go func() { - s.GracefulStop() - close(done) - }() - - select { - case <-done: - case <-time.After(1 * time.Minute): - l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) - s.Stop() - } - - l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) -} - -func initRPCLimiter(c *cfg) error { - var limits []limiting.KeyLimit - for _, l := range rpcconfig.Limits(c.appCfg) { - limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) - } - - if err := validateRPCLimits(c, limits); err != nil { - return fmt.Errorf("validate RPC limits: %w", err) - } - - limiter, err := limiting.NewSemaphoreLimiter(limits) - if err != nil { - return fmt.Errorf("create RPC limiter: %w", err) - } - - c.cfgGRPC.limiter.Store(limiter) - return nil -} - -func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error { - availableMethods := getAvailableMethods(c.cfgGRPC.servers) - for _, limit := range limits { - for _, method := range limit.Keys { - if _, ok := availableMethods[method]; !ok { - return fmt.Errorf("set limit on an unknown method %q", method) - } - } - } - return nil -} - -func getAvailableMethods(servers []grpcServer) map[string]struct{} { - res := make(map[string]struct{}) - for _, server := range servers { - for _, method := range getMethodsForServer(server.Server) { - res[method] = struct{}{} - } - } - return res -} - -func getMethodsForServer(server *grpc.Server) []string { - var res []string - for service, info := range server.GetServiceInfo() { - for _, method := range info.Methods { - res = append(res, fmt.Sprintf("/%s/%s", service, method.Name)) - } - } - return res -} diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go deleted file mode 100644 index 7346206ef..000000000 --- a/cmd/frostfs-node/httpcomponent.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "context" - "net/http" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" - "go.uber.org/zap" -) - -type httpComponent struct { - address string - name string - handler http.Handler - shutdownDur time.Duration - enabled bool - cfg *cfg - preReload func(c *cfg) -} - -func (cmp *httpComponent) init(ctx context.Context, c *cfg) { - if !cmp.enabled { - c.log.Info(ctx, cmp.name+" is disabled") - return - } - // Init server with parameters - srv := httputil.New( - *httputil.NewHTTPSrvPrm( - cmp.address, - cmp.handler, - ), - httputil.WithShutdownTimeout( - cmp.shutdownDur, - ), - ) - c.wg.Add(1) - go func() { - defer c.wg.Done() - - c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, - zap.String("service", cmp.name), - zap.String("endpoint", cmp.address)) - fatalOnErr(srv.Serve()) - }() - c.closers = append(c.closers, closer{ - cmp.name, - func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) }, - }) -} - -func (cmp *httpComponent) reload(ctx context.Context) error { - if cmp.preReload != nil { - cmp.preReload(cmp.cfg) - } - // Shutdown server - closer := getCloser(cmp.cfg, cmp.name) - if closer != nil { - closer.fn() - } - // Cleanup - delCloser(cmp.cfg, cmp.name) - // Init server with new parameters - cmp.init(ctx, cmp.cfg) - // Start worker - if cmp.enabled { - startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name)) - } - return nil -} diff --git a/cmd/frostfs-node/keyspaceiterator.go b/cmd/frostfs-node/keyspaceiterator.go deleted file mode 100644 index 09a8f7f73..000000000 --- a/cmd/frostfs-node/keyspaceiterator.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "context" - "fmt" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" -) - -type keySpaceIterator struct { - ng *engine.StorageEngine - cur *engine.Cursor -} - -func (it *keySpaceIterator) Next(ctx context.Context, batchSize uint32) ([]objectcore.Info, error) { - var prm engine.ListWithCursorPrm - prm.WithCursor(it.cur) - prm.WithCount(batchSize) - - res, err := it.ng.ListWithCursor(ctx, prm) - if err != nil { - return nil, fmt.Errorf("cannot list objects in engine: %w", err) - } - - it.cur = res.Cursor() - return res.AddressList(), nil -} - -func (it *keySpaceIterator) Rewind() { - it.cur = nil -} diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go deleted file mode 100644 index 0228d2a10..000000000 --- a/cmd/frostfs-node/main.go +++ /dev/null @@ -1,181 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "log" - "os" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "go.uber.org/zap" -) - -const ( - // SuccessReturnCode returns when application closed without panic. - SuccessReturnCode = 0 -) - -// prints err to standard logger and calls os.Exit(1). -func fatalOnErr(err error) { - if err != nil { - log.Fatal(err) - } -} - -// prints err with details to standard logger and calls os.Exit(1). -func fatalOnErrDetails(details string, err error) { - if err != nil { - log.Fatal(fmt.Errorf("%s: %w", details, err)) - } -} - -func main() { - configFile := flag.String("config", "", "path to config") - configDir := flag.String("config-dir", "", "path to config directory") - versionFlag := flag.Bool("version", false, "frostfs node version") - dryRunFlag := flag.Bool("check", false, "validate configuration and exit") - flag.Parse() - - if *versionFlag { - fmt.Print(misc.BuildInfo("FrostFS Storage node")) - - os.Exit(SuccessReturnCode) - } - - appCfg := config.New(*configFile, *configDir, config.EnvPrefix) - - err := validateConfig(appCfg) - fatalOnErr(err) - - if *dryRunFlag { - return - } - - c := initCfg(appCfg) - - var ctx context.Context - ctx, c.ctxCancel = context.WithCancel(context.Background()) - - c.setHealthStatus(ctx, control.HealthStatus_STARTING) - - initApp(ctx, c) - - bootUp(ctx, c) - - c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY) - - wait(c) -} - -func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) { - c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name)) - initializer(c) - c.log.Info(ctx, name+" service has been successfully initialized") -} - -func initApp(ctx context.Context, c *cfg) { - c.wg.Add(1) - go func() { - c.signalWatcher(ctx) - c.wg.Done() - }() - - setRuntimeParameters(ctx, c) - metrics, _ := metricsComponent(c) - initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) }) - initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) }) - - initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) }) - - initLocalStorage(ctx, c) - - initAndLog(ctx, c, "storage engine", func(c *cfg) { - fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx)) - fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx)) - }) - - initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) - initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) - initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) }) - - initAccessPolicyEngine(ctx, c) - initAndLog(ctx, c, "access policy engine", func(c *cfg) { - fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx)) - fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init()) - }) - - initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) - initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) }) - initAndLog(ctx, c, "session", initSessionService) - initAndLog(ctx, c, "object", initObjectService) - initAndLog(ctx, c, "tree", initTreeService) - initAndLog(ctx, c, "apemanager", initAPEManagerService) - initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) - - initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) }) - - initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) -} - -func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) { - c.log.Info(ctx, fmt.Sprintf("starting %s service...", name)) - starter(ctx, c) - - if logSuccess { - c.log.Info(ctx, name+" service started successfully") - } -} - -func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) { - c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name)) - - err := stopper(ctx) - if err != nil { - c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), - zap.Error(err), - ) - } - - c.log.Debug(ctx, name+" service has been stopped") -} - -func bootUp(ctx context.Context, c *cfg) { - runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) }) - runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit) - - bootstrapNode(ctx, c) - startWorkers(ctx, c) -} - -func wait(c *cfg) { - c.log.Info(context.Background(), logs.CommonApplicationStarted, - zap.String("version", misc.Version)) - - <-c.done // graceful shutdown - - drain := &sync.WaitGroup{} - drain.Add(1) - go func() { - defer drain.Done() - for err := range c.internalErr { - c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError, - zap.String("message", err.Error())) - } - }() - - c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop) - - c.wg.Wait() - - close(c.internalErr) - drain.Wait() -} - -func (c *cfg) onShutdown(f func()) { - c.closers = append(c.closers, closer{"", f}) -} diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go deleted file mode 100644 index d9ca01e70..000000000 --- a/cmd/frostfs-node/metrics.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" -) - -func metricsComponent(c *cfg) (*httpComponent, bool) { - var updated bool - // check if it has been inited before - if c.metrics == nil { - c.metrics = new(httpComponent) - c.metrics.cfg = c - c.metrics.name = "metrics" - c.metrics.handler = metrics.Handler() - updated = true - } - - // (re)init read configuration - enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.metrics.enabled { - c.metrics.enabled = enabled - updated = true - } - address := metricsconfig.Address(c.appCfg) - if address != c.metrics.address { - c.metrics.address = address - updated = true - } - dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.metrics.shutdownDur { - c.metrics.shutdownDur = dur - updated = true - } - - return c.metrics, updated -} - -func enableMetricsSvc(c *cfg) { - c.metricsSvc.Enable() -} - -func disableMetricsSvc(c *cfg) { - c.metricsSvc.Disable() -} diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go deleted file mode 100644 index 917cf6fc0..000000000 --- a/cmd/frostfs-node/morph.go +++ /dev/null @@ -1,273 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "time" - - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -const ( - newEpochNotification = "NewEpoch" -) - -func (c *cfg) initMorphComponents(ctx context.Context) { - c.cfgMorph.guard.Lock() - defer c.cfgMorph.guard.Unlock() - if c.cfgMorph.initialized { - return - } - initMorphClient(ctx, c) - - lookupScriptHashesInNNS(c) // smart contract auto negotiation - - err := c.cfgMorph.client.EnableNotarySupport( - client.WithProxyContract( - c.cfgMorph.proxyScriptHash, - ), - ) - fatalOnErr(err) - - c.log.Info(ctx, logs.FrostFSNodeNotarySupport) - - wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0) - fatalOnErr(err) - - var netmapSource netmap.Source - - c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg) - c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg) - - if c.cfgMorph.cacheTTL == 0 { - msPerBlock, err := c.cfgMorph.client.MsPerBlock() - fatalOnErr(err) - c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond - c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) - } - - if c.cfgMorph.cacheTTL < 0 { - netmapSource = newRawNetmapStorage(wrap) - } else { - // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, - morphconfig.NetmapCandidatesPollInterval(c.appCfg)) - } - - c.netMapSource = netmapSource - c.cfgNetmap.wrapper = wrap - c.cfgMorph.initialized = true -} - -func initMorphClient(ctx context.Context, c *cfg) { - addresses := morphconfig.RPCEndpoint(c.appCfg) - - // Morph client stable-sorts endpoints by priority. Shuffle here to randomize - // order of endpoints with the same priority. - rand.Shuffle(len(addresses), func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - - cli, err := client.New(ctx, - c.key, - client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log.WithTag(logger.TagMorph)), - client.WithMetrics(c.metricsCollector.MorphClientMetrics()), - client.WithEndpoints(addresses...), - client.WithConnLostCallback(func() { - c.internalErr <- errors.New("morph connection has been lost") - }), - client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)), - client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()), - client.WithDialerSource(c.dialerSource), - ) - if err != nil { - c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, - zap.Any("endpoints", addresses), - zap.Error(err), - ) - - fatalOnErr(err) - } - - c.onShutdown(func() { - c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents) - cli.Close() - }) - - if err := cli.SetGroupSignerScope(); err != nil { - c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) - } - - c.cfgMorph.client = cli -} - -func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { - tx, vub, err := makeNotaryDeposit(ctx, c) - fatalOnErr(err) - - if tx.Equals(util.Uint256{}) { - // non-error deposit with an empty TX hash means - // that the deposit has already been made; no - // need to wait it. - c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) - return - } - - err = waitNotaryDeposit(ctx, c, tx, vub) - fatalOnErr(err) -} - -func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) { - const ( - // gasMultiplier defines how many times more the notary - // balance must be compared to the GAS balance of the node: - // notaryBalance = GASBalance * gasMultiplier - gasMultiplier = 3 - - // gasDivisor defines what part of GAS balance (1/gasDivisor) - // should be transferred to the notary service - gasDivisor = 2 - ) - - depositAmount, err := client.CalculateNotaryDepositAmount(c.cfgMorph.client, gasMultiplier, gasDivisor) - if err != nil { - return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err) - } - - return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount) -} - -func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { - if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { - return err - } - - c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) - return nil -} - -func listenMorphNotifications(ctx context.Context, c *cfg) { - var ( - err error - subs subscriber.Subscriber - ) - log := c.log.WithTag(logger.TagMorph) - - fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) - if err != nil { - fromSideChainBlock = 0 - c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) - } - - subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: log, - StartFromBlock: fromSideChainBlock, - Client: c.cfgMorph.client, - }) - fatalOnErr(err) - - lis, err := event.NewListener(event.ListenerParams{ - Logger: log, - Subscriber: subs, - }) - fatalOnErr(err) - - c.onShutdown(func() { - lis.Stop() - }) - - c.workers = append(c.workers, newWorkerFromFunc(func(wCtx context.Context) { - runAndLog(wCtx, c, "morph notification", false, func(lCtx context.Context, c *cfg) { - lis.ListenWithError(lCtx, c.internalErr) - }) - })) - - setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { - res, err := netmapEvent.ParseNewEpoch(src) - if err == nil { - log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, - zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), - ) - } - - return res, err - }) - registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers) - registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) - - registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { - log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) - - err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) - if err != nil { - log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, - zap.String("chain", "side"), - zap.Uint32("block_index", block.Index)) - } - }) -} - -func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parsers map[event.Type]event.NotificationParser, - subs map[event.Type][]event.Handler, -) { - for typ, handlers := range subs { - p, ok := parsers[typ] - if !ok { - panic(fmt.Sprintf("missing parser for event %s", typ)) - } - - lis.RegisterNotificationHandler(event.NotificationHandlerInfo{ - Contract: scHash, - Type: typ, - Parser: p, - Handlers: handlers, - }) - } -} - -func registerBlockHandler(lis event.Listener, handler event.BlockHandler) { - lis.RegisterBlockHandler(handler) -} - -// lookupScriptHashesInNNS looks up for contract script hashes in NNS contract of side -// chain if they were not specified in config file. -func lookupScriptHashesInNNS(c *cfg) { - var ( - err error - - emptyHash = util.Uint160{} - targets = [...]struct { - h *util.Uint160 - nnsName string - }{ - {&c.cfgNetmap.scriptHash, client.NNSNetmapContractName}, - {&c.cfgAccounting.scriptHash, client.NNSBalanceContractName}, - {&c.cfgContainer.scriptHash, client.NNSContainerContractName}, - {&c.cfgFrostfsID.scriptHash, client.NNSFrostFSIDContractName}, - {&c.cfgMorph.proxyScriptHash, client.NNSProxyContractName}, - {&c.cfgObject.cfgAccessPolicyEngine.policyContractHash, client.NNSPolicyContractName}, - } - ) - - for _, t := range targets { - if emptyHash.Equals(*t.h) { - *t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName) - fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err) - } - } -} diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go deleted file mode 100644 index 7dfb4fe12..000000000 --- a/cmd/frostfs-node/netmap.go +++ /dev/null @@ -1,480 +0,0 @@ -package main - -import ( - "bytes" - "context" - "errors" - "fmt" - "net" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - netmapTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/netmap/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" - netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -// primary solution of local network state dump. -type networkState struct { - epoch *atomic.Uint64 - - controlNetStatus atomic.Int32 // control.NetmapStatus - - nodeInfo atomic.Value // netmapSDK.NodeInfo - - metrics *metrics.NodeMetrics -} - -func newNetworkState() *networkState { - ns := &networkState{ - epoch: new(atomic.Uint64), - } - ns.controlNetStatus.Store(int32(control.NetmapStatus_STATUS_UNDEFINED)) - return ns -} - -func (s *networkState) CurrentEpoch() uint64 { - return s.epoch.Load() -} - -func (s *networkState) setCurrentEpoch(v uint64) { - s.epoch.Store(v) - if s.metrics != nil { - s.metrics.SetEpoch(v) - } -} - -func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { - ctrlNetSt := control.NetmapStatus_STATUS_UNDEFINED - - if ni != nil { - s.nodeInfo.Store(*ni) - - switch ni.Status() { - case netmapSDK.Online: - ctrlNetSt = control.NetmapStatus_ONLINE - case netmapSDK.Offline: - ctrlNetSt = control.NetmapStatus_OFFLINE - case netmapSDK.Maintenance: - ctrlNetSt = control.NetmapStatus_MAINTENANCE - case netmapSDK.UnspecifiedState: - ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED - } - } else { - ctrlNetSt = control.NetmapStatus_OFFLINE - - niRaw := s.nodeInfo.Load() - if niRaw != nil { - niOld := niRaw.(netmapSDK.NodeInfo) - - // nil ni means that the node is not included - // in the netmap - niOld.SetStatus(netmapSDK.Offline) - - s.nodeInfo.Store(niOld) - } - } - - s.setControlNetmapStatus(ctrlNetSt) -} - -// sets the current node state to the given value. Subsequent cfg.bootstrap -// calls will process this value to decide what status node should set in the -// network. -func (s *networkState) setControlNetmapStatus(st control.NetmapStatus) { - s.controlNetStatus.Store(int32(st)) -} - -func (s *networkState) controlNetmapStatus() (res control.NetmapStatus) { - return control.NetmapStatus(s.controlNetStatus.Load()) -} - -func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { - v := s.nodeInfo.Load() - if v != nil { - res, ok = v.(netmapSDK.NodeInfo) - assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) - } - - return -} - -func nodeKeyFromNetmap(c *cfg) []byte { - ni, ok := c.cfgNetmap.state.getNodeInfo() - if ok { - return ni.PublicKey() - } - - return nil -} - -func (c *cfg) iterateNetworkAddresses(f func(string) bool) { - ni, ok := c.cfgNetmap.state.getNodeInfo() - if ok { - for s := range ni.NetworkEndpoints() { - if f(s) { - return - } - } - } -} - -func (c *cfg) addressNum() int { - ni, ok := c.cfgNetmap.state.getNodeInfo() - if ok { - return ni.NumberOfNetworkEndpoints() - } - - return 0 -} - -func initNetmapService(ctx context.Context, c *cfg) { - network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo) - c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes()) - parseAttributes(c) - c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline) - - c.initMorphComponents(ctx) - - initNetmapState(ctx, c) - - server := netmapTransportGRPC.New( - netmapService.NewSignService( - &c.key.PrivateKey, - netmapService.NewExecutionService( - c, - c.apiVersion, - &netInfo{ - netState: c.cfgNetmap.state, - magic: c.cfgMorph.client, - morphClientNetMap: c.cfgNetmap.wrapper, - msPerBlockRdr: c.cfgMorph.client.MsPerBlock, - }, - c.respSvc, - ), - ), - ) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - netmapGRPC.RegisterNetmapServiceServer(s, server) - - // TODO(@aarifullin): #1487 remove the dual service support. - s.RegisterService(frostFSServiceDesc(netmapGRPC.NetmapService_ServiceDesc), server) - }) - - addNewEpochNotificationHandlers(c) -} - -func addNewEpochNotificationHandlers(c *cfg) { - addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) { - c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber()) - }) - - addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { - e := ev.(netmapEvent.NewEpoch).EpochNumber() - - c.updateContractNodeInfo(ctx, e) - - if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 - return - } - - if err := c.bootstrap(ctx); err != nil { - c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) - } - }) - - addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { - _, _, err := makeNotaryDeposit(ctx, c) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, - zap.Error(err), - ) - } - }) -} - -// bootstrapNode adds current node to the Network map. -// Must be called after initNetmapService. -func bootstrapNode(ctx context.Context, c *cfg) { - if c.IsMaintenance() { - c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return - } - err := c.bootstrap(ctx) - fatalOnErrDetails("bootstrap error", err) -} - -func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { - typ := event.TypeFromString(sTyp) - - if c.cfgNetmap.subscribers == nil { - c.cfgNetmap.subscribers = make(map[event.Type][]event.Handler, 1) - } - - c.cfgNetmap.subscribers[typ] = append(c.cfgNetmap.subscribers[typ], h) -} - -func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser) { - typ := event.TypeFromString(sTyp) - - if c.cfgNetmap.parsers == nil { - c.cfgNetmap.parsers = make(map[event.Type]event.NotificationParser, 1) - } - - c.cfgNetmap.parsers[typ] = p -} - -// initNetmapState inits current Network map state. -// Must be called after Morph components initialization. -func initNetmapState(ctx context.Context, c *cfg) { - epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) - fatalOnErrDetails("could not initialize current epoch number", err) - - var ni *netmapSDK.NodeInfo - ni, err = c.netmapInitLocalNodeState(ctx, epoch) - fatalOnErrDetails("could not init network state", err) - - stateWord := nodeState(ni) - - c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState, - zap.Uint64("epoch", epoch), - zap.String("state", stateWord), - ) - - if ni != nil && ni.Status().IsMaintenance() { - c.isMaintenance.Store(true) - } - - c.cfgNetmap.state.setCurrentEpoch(epoch) - c.setContractNodeInfo(ni) -} - -func nodeState(ni *netmapSDK.NodeInfo) string { - if ni != nil { - switch ni.Status() { - case netmapSDK.Online: - return "online" - case netmapSDK.Offline: - return "offline" - case netmapSDK.Maintenance: - return "maintenance" - case netmapSDK.UnspecifiedState: - return "undefined" - } - } - return "undefined" -} - -func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { - nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) - if err != nil { - return nil, err - } - - var candidate *netmapSDK.NodeInfo - for i := range nmNodes { - if bytes.Equal(nmNodes[i].PublicKey(), c.binPublicKey) { - candidate = &nmNodes[i] - break - } - } - - node, err := c.netmapLocalNodeState(ctx, epoch) - if err != nil { - return nil, err - } - - if candidate == nil { - return node, nil - } - - nmState := nodeState(node) - candidateState := nodeState(candidate) - if nmState != candidateState { - // This happens when the node was switched to maintenance without epoch tick. - // We expect it to continue staying in maintenance. - c.log.Info(ctx, logs.CandidateStatusPriority, - zap.String("netmap", nmState), - zap.String("candidate", candidateState)) - } - return candidate, nil -} - -func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { - // calculate current network state - nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) - if err != nil { - return nil, err - } - - c.netMap.Store(*nm) - - nmNodes := nm.Nodes() - for i := range nmNodes { - if bytes.Equal(nmNodes[i].PublicKey(), c.binPublicKey) { - return &nmNodes[i], nil - } - } - - return nil, nil -} - -// addNewEpochNotificationHandler adds handler that will be executed synchronously. -func addNewEpochNotificationHandler(c *cfg, h event.Handler) { - addNetmapNotificationHandler(c, newEpochNotification, h) -} - -// addNewEpochAsyncNotificationHandler adds handler that will be executed asynchronously via netmap workerPool. -func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { - addNetmapNotificationHandler( - c, - newEpochNotification, - event.WorkerPoolHandler( - c.cfgNetmap.workerPool, - h, - c.log, - ), - ) -} - -func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { - switch st { - default: - return fmt.Errorf("unsupported status %v", st) - case control.NetmapStatus_MAINTENANCE: - return c.setMaintenanceStatus(ctx, false) - case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE: - } - - c.stopMaintenance(ctx) - - if st == control.NetmapStatus_ONLINE { - c.cfgNetmap.reBoostrapTurnedOff.Store(false) - return bootstrapOnline(ctx, c) - } - - c.cfgNetmap.reBoostrapTurnedOff.Store(true) - - return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) -} - -func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { - epoch, err := c.netMapSource.Epoch(ctx) - if err != nil { - return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) - } - st := c.NetmapStatus() - return st, epoch, nil -} - -func (c *cfg) ForceMaintenance(ctx context.Context) error { - return c.setMaintenanceStatus(ctx, true) -} - -func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { - netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) - if err != nil { - err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) - } else if !netSettings.MaintenanceModeAllowed { - err = errors.New("maintenance mode is not allowed by the network") - } - - if err == nil || force { - c.startMaintenance(ctx) - - if err == nil { - err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance) - } - - if err != nil { - return fmt.Errorf("local maintenance is started, but state is not updated in the network: %w", err) - } - } - - return err -} - -// calls UpdatePeerState operation of Netmap contract's client for the local node. -// State setter is used to specify node state to switch to. -func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error { - var prm nmClient.UpdatePeerPrm - prm.SetKey(c.key.PublicKey().Bytes()) - stateSetter(&prm) - - res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm) - if err != nil { - return err - } - return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) -} - -type netInfo struct { - netState netmap.State - - magic interface { - MagicNumber() (uint64, error) - } - - morphClientNetMap *nmClient.Client - - msPerBlockRdr func() (int64, error) -} - -func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { - magic, err := n.magic.MagicNumber() - if err != nil { - return nil, err - } - - var ni netmapSDK.NetworkInfo - ni.SetCurrentEpoch(n.netState.CurrentEpoch()) - ni.SetMagicNumber(magic) - - netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) - if err != nil { - return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) - } - - if mjr := ver.Major(); mjr > 2 || mjr == 2 && ver.Minor() > 9 { - msPerBlock, err := n.msPerBlockRdr() - if err != nil { - return nil, fmt.Errorf("ms per block: %w", err) - } - - ni.SetMsPerBlock(msPerBlock) - - ni.SetMaxObjectSize(netInfoMorph.MaxObjectSize) - ni.SetEpochDuration(netInfoMorph.EpochDuration) - ni.SetContainerFee(netInfoMorph.ContainerFee) - ni.SetNamedContainerFee(netInfoMorph.ContainerAliasFee) - ni.SetIRCandidateFee(netInfoMorph.IRCandidateFee) - ni.SetWithdrawalFee(netInfoMorph.WithdrawalFee) - - if netInfoMorph.HomomorphicHashingDisabled { - ni.DisableHomomorphicHashing() - } - - if netInfoMorph.MaintenanceModeAllowed { - ni.AllowMaintenanceMode() - } - - for i := range netInfoMorph.Raw { - ni.SetRawNetworkParameter(netInfoMorph.Raw[i].Name, netInfoMorph.Raw[i].Value) - } - } - - return &ni, nil -} diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go deleted file mode 100644 index e6be9cdf5..000000000 --- a/cmd/frostfs-node/netmap_source.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -type rawNetmapSource struct { - client *netmapClient.Client -} - -func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { - return &rawNetmapSource{ - client: client, - } -} - -func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { - nm, err := s.client.GetNetMap(ctx, diff) - if err != nil { - return nil, err - } - candidates, err := s.client.GetCandidates(ctx) - if err != nil { - return nil, err - } - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - return nm, nil -} - -func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - nm, err := s.client.GetNetMapByEpoch(ctx, epoch) - if err != nil { - return nil, err - } - candidates, err := s.client.GetCandidates(ctx) - if err != nil { - return nil, err - } - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - return nm, nil -} - -func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { - return s.client.Epoch(ctx) -} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go deleted file mode 100644 index c33c02b3f..000000000 --- a/cmd/frostfs-node/object.go +++ /dev/null @@ -1,474 +0,0 @@ -package main - -import ( - "context" - "fmt" - "net" - - metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" - policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer" - replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" - objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" - objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" - deletesvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete/v2" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - getsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get/v2" - patchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/patch" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - putsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put/v2" - searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" - searchsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/v2" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type objectSvc struct { - put *putsvcV2.Service - - search *searchsvcV2.Service - - get *getsvcV2.Service - - delete *deletesvcV2.Service - - patch *patchsvc.Service -} - -func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { - sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, - zap.Error(err), - ) - } - - return sz -} - -func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) { - return s.put.Put() -} - -func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) { - return s.patch.Patch() -} - -func (s *objectSvc) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - return s.put.PutSingle(ctx, req) -} - -func (s *objectSvc) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { - return s.get.Head(ctx, req) -} - -func (s *objectSvc) Search(req *object.SearchRequest, stream objectService.SearchStream) error { - return s.search.Search(req, stream) -} - -func (s *objectSvc) Get(req *object.GetRequest, stream objectService.GetObjectStream) error { - return s.get.Get(req, stream) -} - -func (s *objectSvc) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { - return s.delete.Delete(ctx, req) -} - -func (s *objectSvc) GetRange(req *object.GetRangeRequest, stream objectService.GetObjectRangeStream) error { - return s.get.GetRange(req, stream) -} - -func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - return s.get.GetRangeHash(ctx, req) -} - -type delNetInfo struct { - netmap.State - - cfg *cfg -} - -func (i *delNetInfo) TombstoneLifetime() (uint64, error) { - return i.cfg.cfgObject.tombstoneLifetime.Load(), nil -} - -// LocalNodeID returns node owner ID calculated from configured private key. -// -// Implements method needed for Object.Delete service. -func (i *delNetInfo) LocalNodeID() user.ID { - return i.cfg.ownerIDFromKey -} - -type innerRingFetcherWithNotary struct { - sidechain *morphClient.Client -} - -func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { - keys, err := fn.sidechain.NeoFSAlphabetList(ctx) - if err != nil { - return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) - } - - result := make([][]byte, 0, len(keys)) - for i := range keys { - result = append(result, keys[i].Bytes()) - } - - return result, nil -} - -func initObjectService(c *cfg) { - keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) - - c.replicator = createReplicator(c, keyStorage, c.bgClientCache) - - addPolicer(c, keyStorage, c.bgClientCache) - - traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c) - irFetcher := newCachedIRFetcher(createInnerRingFetcher(c)) - - sPut := createPutSvc(c, keyStorage, &irFetcher) - - sPutV2 := createPutSvcV2(sPut, keyStorage) - - sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource) - - sSearchV2 := createSearchSvcV2(sSearch, keyStorage) - - sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource, - c.ObjectCfg.priorityMetrics) - - *c.cfgObject.getSvc = *sGet // need smth better - - sGetV2 := createGetServiceV2(c, sGet, keyStorage) - - sDelete := createDeleteService(c, keyStorage, sGet, sSearch, sPut) - - sDeleteV2 := createDeleteServiceV2(sDelete) - - sPatch := createPatchSvc(sGet, sPut) - - // build service pipeline - // grpc | audit | qos | | signature | response | acl | ape | split - - splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - - apeSvc := createAPEService(c, &irFetcher, splitSvc) - - var commonSvc objectService.Common - commonSvc.Init(&c.internals, apeSvc) - - respSvc := objectService.NewResponseService( - &commonSvc, - c.respSvc, - ) - - signSvc := objectService.NewSignService( - &c.key.PrivateKey, - respSvc, - ) - - c.metricsSvc = objectService.NewMetricCollector( - signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) - auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) - server := objectTransportGRPC.New(auditSvc) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - objectGRPC.RegisterObjectServiceServer(s, server) - - // TODO(@aarifullin): #1487 remove the dual service support. - s.RegisterService(frostFSServiceDesc(objectGRPC.ObjectService_ServiceDesc), server) - }) -} - -func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) { - if policerconfig.UnsafeDisable(c.appCfg) { - c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled) - return - } - - ls := c.cfgObject.cfgLocalStorage.localStorage - - buryFn := func(ctx context.Context, addr oid.Address) error { - var prm engine.InhumePrm - prm.MarkAsGarbage(addr) - prm.WithForceRemoval() - - return ls.Inhume(ctx, prm) - } - - remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) - pol := policer.New( - policer.WithLogger(c.log.WithTag(logger.TagPolicer)), - policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), - policer.WithBuryFunc(buryFn), - policer.WithContainerSource(c.cfgObject.cnrSource), - policer.WithPlacementBuilder( - placement.NewNetworkMapSourceBuilder(c.netMapSource), - ), - policer.WithRemoteObjectHeaderFunc( - func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - prm := new(objectService.RemoteRequestPrm).WithNodeInfo(ni).WithObjectAddress(a).WithRaw(raw) - return remoteReader.Head(ctx, prm) - }, - ), - policer.WithLocalObjectHeaderFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - var prm engine.HeadPrm - prm.WithAddress(a) - res, err := c.cfgObject.cfgLocalStorage.localStorage.Head(ctx, prm) - if err != nil { - return nil, err - } - return res.Header(), nil - }), - policer.WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) { - prm := new(objectService.RemoteRequestPrm).WithNodeInfo(ni).WithObjectAddress(a) - return remoteReader.Get(ctx, prm) - }), - policer.WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - var prm engine.GetPrm - prm.WithAddress(a) - res, err := c.cfgObject.cfgLocalStorage.localStorage.Get(ctx, prm) - if err != nil { - return nil, err - } - return res.Object(), nil - }), - policer.WithNetmapKeys(c), - policer.WithHeadTimeout( - policerconfig.HeadTimeout(c.appCfg), - ), - policer.WithReplicator(c.replicator), - policer.WithRedundantCopyCallback(func(ctx context.Context, addr oid.Address) { - var inhumePrm engine.InhumePrm - inhumePrm.MarkAsGarbage(addr) - - if err := ls.Inhume(ctx, inhumePrm); err != nil { - c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, - zap.Error(err), - ) - } - }), - policer.WithPool(c.cfgObject.pool.replication), - policer.WithMetrics(c.metricsCollector.PolicerMetrics()), - policer.WithKeyStorage(keyStorage), - ) - - c.workers = append(c.workers, worker{ - fn: func(ctx context.Context) { - pol.Run(ctx) - }, - }) -} - -func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { - return &innerRingFetcherWithNotary{ - sidechain: c.cfgMorph.client, - } -} - -func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator { - ls := c.cfgObject.cfgLocalStorage.localStorage - - return replicator.New( - replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), - replicator.WithPutTimeout( - replicatorconfig.PutTimeout(c.appCfg), - ), - replicator.WithLocalStorage(ls), - replicator.WithRemoteSender( - objectwriter.NewRemoteSender(keyStorage, cache), - ), - replicator.WithRemoteGetter( - getsvc.NewRemoteGetter(c.clientCache, c.netMapSource, keyStorage), - ), - replicator.WithMetrics(c.metricsCollector.Replicator()), - ) -} - -func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service { - ls := c.cfgObject.cfgLocalStorage.localStorage - - var os objectwriter.ObjectStorage = engineWithoutNotifications{ - engine: ls, - } - - return putsvc.NewService( - keyStorage, - c.putClientCache, - newCachedMaxObjectSizeSource(c), - os, - c.cfgObject.cnrSource, - c.netMapSource, - c, - c.cfgNetmap.state, - irFetcher, - objectwriter.WithLogger(c.log), - objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), - ) -} - -func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2.Service { - return putsvcV2.NewService(sPut, keyStorage) -} - -func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Service { - return patchsvc.NewService(sPut.Config, sGet) -} - -func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service { - ls := c.cfgObject.cfgLocalStorage.localStorage - - return searchsvc.New( - ls, - coreConstructor, - traverseGen.WithTraverseOptions( - placement.WithoutSuccessTracking(), - ), - c.netMapSource, - keyStorage, - containerSource, - searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), - ) -} - -func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage) *searchsvcV2.Service { - return searchsvcV2.NewService(sSearch, keyStorage) -} - -func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, - coreConstructor *cache.ClientCache, - containerSource containercore.Source, - priorityMetrics []placement.Metric, -) *getsvc.Service { - ls := c.cfgObject.cfgLocalStorage.localStorage - - return getsvc.New( - keyStorage, - c.netMapSource, - ls, - traverseGen.WithTraverseOptions( - placement.SuccessAfter(1), - placement.WithPriorityMetrics(priorityMetrics), - placement.WithNodeState(c), - ), - coreConstructor, - containerSource, - getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) -} - -func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { - return getsvcV2.NewService( - sGet, - keyStorage, - c.clientCache, - c.netMapSource, - c, - c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), - ) -} - -func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Service, - sSearch *searchsvc.Service, sPut *putsvc.Service, -) *deletesvc.Service { - return deletesvc.New( - sGet, - sSearch, - sPut, - &delNetInfo{ - State: c.cfgNetmap.state, - - cfg: c, - }, - keyStorage, - deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), - ) -} - -func createDeleteServiceV2(sDelete *deletesvc.Service) *deletesvcV2.Service { - return deletesvcV2.NewService(sDelete) -} - -func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Service, - sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service, sPatch *patchsvc.Service, -) *objectService.TransportSplitter { - return objectService.NewTransportSplitter( - c.cfgGRPC.maxChunkSize, - c.cfgGRPC.maxAddrAmount, - &objectSvc{ - put: sPutV2, - search: sSearchV2, - get: sGetV2, - delete: sDeleteV2, - patch: sPatch, - }, - ) -} - -func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { - return objectAPE.NewService( - objectAPE.NewChecker( - c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), - c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), - objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.frostfsidClient, - c.netMapSource, - c.cfgNetmap.state, - c.cfgObject.cnrSource, - c.binPublicKey, - ), - objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), - splitSvc, - ) -} - -type engineWithoutNotifications struct { - engine *engine.StorageEngine -} - -func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) { - return e.engine.IsLocked(ctx, address) -} - -func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error { - var prm engine.InhumePrm - - addrs := make([]oid.Address, len(toDelete)) - for i := range addrs { - addrs[i].SetContainer(tombstone.Container()) - addrs[i].SetObject(toDelete[i]) - } - - prm.WithTarget(tombstone, addrs...) - - return e.engine.Inhume(ctx, prm) -} - -func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { - return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock) -} - -func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error { - return engine.Put(ctx, e.engine, o, indexedContainer) -} diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go deleted file mode 100644 index 55f76cc76..000000000 --- a/cmd/frostfs-node/policy_engine.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/google/uuid" - "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type accessPolicyEngine struct { - localOverrideDatabase chainbase.LocalOverrideDatabase - - morphChainStorage engine.MorphRuleChainStorageReader -} - -var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil) - -type morphAPEChainCacheKey struct { - // nolint:unused - name chain.Name - // nolint:unused - target engine.Target -} - -type morphAPEChainCache struct { - source engine.MorphRuleChainStorageReader - cache *expirable.LRU[morphAPEChainCacheKey, []*chain.Chain] -} - -func newMorphCache(source engine.MorphRuleChainStorageReader, size int, ttl time.Duration) engine.MorphRuleChainStorageReader { - return &morphAPEChainCache{ - source: source, - cache: expirable.NewLRU(size, func(morphAPEChainCacheKey, []*chain.Chain) {}, ttl), - } -} - -func (m *morphAPEChainCache) GetAdmin() (util.Uint160, error) { - return m.source.GetAdmin() -} - -func (m *morphAPEChainCache) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { - key := morphAPEChainCacheKey{name: name, target: target} - result, found := m.cache.Get(key) - if found { - return result, nil - } - - result, err := m.source.ListMorphRuleChains(name, target) - if err != nil { - return nil, err - } - - m.cache.Add(key, result) - return result, nil -} - -func (m *morphAPEChainCache) ListTargetsIterator(targetType engine.TargetType) (uuid.UUID, result.Iterator, error) { - return m.source.ListTargetsIterator(targetType) -} - -func newAccessPolicyEngine( - morphChainStorage engine.MorphRuleChainStorageReader, - localOverrideDatabase chainbase.LocalOverrideDatabase, -) *accessPolicyEngine { - return &accessPolicyEngine{ - morphChainStorage: morphChainStorage, - - localOverrideDatabase: localOverrideDatabase, - } -} - -func (a *accessPolicyEngine) LocalStorage() engine.LocalOverrideStorage { - return a.localOverrideDatabase -} - -func (a *accessPolicyEngine) MorphRuleChainStorage() engine.MorphRuleChainStorageReader { - return a.morphChainStorage -} - -func (a *accessPolicyEngine) LocalOverrideDatabaseCore() chainbase.DatabaseCore { - return a.localOverrideDatabase -} diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go deleted file mode 100644 index e4da8119f..000000000 --- a/cmd/frostfs-node/pprof.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "context" - "runtime" - - profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler" - httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" -) - -func initProfilerService(ctx context.Context, c *cfg) { - tuneProfilers(c) - - pprof, _ := pprofComponent(c) - pprof.init(ctx, c) -} - -func pprofComponent(c *cfg) (*httpComponent, bool) { - var updated bool - // check if it has been inited before - if c.pprof == nil { - c.pprof = new(httpComponent) - c.pprof.cfg = c - c.pprof.name = "pprof" - c.pprof.handler = httputil.Handler() - c.pprof.preReload = tuneProfilers - updated = true - } - - // (re)init read configuration - enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.pprof.enabled { - c.pprof.enabled = enabled - updated = true - } - address := profilerconfig.Address(c.appCfg) - if address != c.pprof.address { - c.pprof.address = address - updated = true - } - dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.pprof.shutdownDur { - c.pprof.shutdownDur = dur - updated = true - } - - return c.pprof, updated -} - -func tuneProfilers(c *cfg) { - // Disabled by default, see documentation for - // runtime.SetBlockProfileRate() and runtime.SetMutexProfileFraction(). - blockRate := 0 - mutexRate := 0 - - if profilerconfig.Enabled(c.appCfg) { - blockRate = profilerconfig.BlockRate(c.appCfg) - mutexRate = profilerconfig.MutexRate(c.appCfg) - } - - runtime.SetBlockProfileRate(blockRate) - runtime.SetMutexProfileFraction(mutexRate) -} diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go deleted file mode 100644 index 6394b668b..000000000 --- a/cmd/frostfs-node/qos.go +++ /dev/null @@ -1,108 +0,0 @@ -package main - -import ( - "bytes" - "context" - - qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "go.uber.org/zap" -) - -type cfgQoSService struct { - netmapSource netmap.Source - logger *logger.Logger - allowedCriticalPubs [][]byte - allowedInternalPubs [][]byte -} - -func initQoSService(c *cfg) { - criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) - internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg) - rawCriticalPubs := make([][]byte, 0, len(criticalPubs)) - rawInternalPubs := make([][]byte, 0, len(internalPubs)) - for i := range criticalPubs { - rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes()) - } - for i := range internalPubs { - rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) - } - - c.cfgQoSService = cfgQoSService{ - netmapSource: c.netMapSource, - logger: c.log, - allowedCriticalPubs: rawCriticalPubs, - allowedInternalPubs: rawInternalPubs, - } -} - -func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { - rawTag, defined := qosTagging.IOTagFromContext(ctx) - if !defined { - if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { - return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) - } - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - ioTag, err := qos.FromRawString(rawTag) - if err != nil { - s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - - switch ioTag { - case qos.IOTagClient: - return ctx - case qos.IOTagCritical: - for _, pk := range s.allowedCriticalPubs { - if bytes.Equal(pk, requestSignPublicKey) { - return ctx - } - } - nm, err := s.netmapSource.GetNetMap(ctx, 0) - if err != nil { - s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - for _, node := range nm.Nodes() { - if bytes.Equal(node.PublicKey(), requestSignPublicKey) { - return ctx - } - } - s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - case qos.IOTagInternal: - if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { - return ctx - } - s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - default: - s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } -} - -func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { - for _, pk := range s.allowedInternalPubs { - if bytes.Equal(pk, publicKey) { - return true - } - } - nm, err := s.netmapSource.GetNetMap(ctx, 0) - if err != nil { - s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) - return false - } - for _, node := range nm.Nodes() { - if bytes.Equal(node.PublicKey(), publicKey) { - return true - } - } - - return false -} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go deleted file mode 100644 index 971f9eebf..000000000 --- a/cmd/frostfs-node/qos_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package main - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestQoSService_Client(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag client defined", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) -} - -func TestQoSService_Internal(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) -} - -func TestQoSService_Critical(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagCritical.String(), tag) - }) - t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagCritical.String(), tag) - }) -} - -func TestQoSService_NetmapGetError(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - s.netmapSource = &utilTesting.TestNetmapSource{} - t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) -} - -func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { - nmSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - reqSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - allowedCritSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - allowedIntSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - var node netmap.NodeInfo - node.SetPublicKey(nmSigner.PublicKey().Bytes()) - nm := &netmap.NetMap{} - nm.SetEpoch(100) - nm.SetNodes([]netmap.NodeInfo{node}) - - return &cfgQoSService{ - logger: test.NewLogger(t), - netmapSource: &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ - 100: nm, - }, - CurrentEpoch: 100, - }, - allowedCriticalPubs: [][]byte{ - allowedCritSigner.PublicKey().Bytes(), - }, - allowedInternalPubs: [][]byte{ - allowedIntSigner.PublicKey().Bytes(), - }, - }, - &testQoSServicePublicKeys{ - NetmapNode: nmSigner.PublicKey().Bytes(), - Request: reqSigner.PublicKey().Bytes(), - Internal: allowedIntSigner.PublicKey().Bytes(), - Critical: allowedCritSigner.PublicKey().Bytes(), - } -} - -type testQoSServicePublicKeys struct { - NetmapNode []byte - Request []byte - Internal []byte - Critical []byte -} diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go deleted file mode 100644 index f6d398574..000000000 --- a/cmd/frostfs-node/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - "context" - "os" - "runtime/debug" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/runtime" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "go.uber.org/zap" -) - -func setRuntimeParameters(ctx context.Context, c *cfg) { - if len(os.Getenv("GOMEMLIMIT")) != 0 { - // default limit < yaml limit < app env limit < GOMEMLIMIT - c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) - return - } - - memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg) - previous := debug.SetMemoryLimit(memLimitBytes) - if memLimitBytes != previous { - c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated, - zap.Int64("new_value", memLimitBytes), - zap.Int64("old_value", previous)) - } -} diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go deleted file mode 100644 index fbfe3f5e6..000000000 --- a/cmd/frostfs-node/session.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "context" - "fmt" - "net" - "time" - - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - sessionTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/session/grpc" - sessionSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "google.golang.org/grpc" -) - -type sessionStorage interface { - Create(ctx context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) - Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken - RemoveOld(epoch uint64) - - Close() error -} - -func initSessionService(c *cfg) { - if persistentSessionPath := nodeconfig.PersistentSessions(c.appCfg).Path(); persistentSessionPath != "" { - persisessions, err := persistent.NewTokenStore(persistentSessionPath, - persistent.WithLogger(c.log), - persistent.WithTimeout(100*time.Millisecond), - persistent.WithEncryptionKey(&c.key.PrivateKey), - ) - if err != nil { - panic(fmt.Errorf("could not create persistent session token storage: %w", err)) - } - - c.privateTokenStore = persisessions - } else { - c.privateTokenStore = temporary.NewTokenStore() - } - - c.onShutdown(func() { - _ = c.privateTokenStore.Close() - }) - - addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) { - c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber()) - }) - - server := sessionTransportGRPC.New( - sessionSvc.NewSignService( - &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), - ), - ) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - sessionGRPC.RegisterSessionServiceServer(s, server) - - // TODO(@aarifullin): #1487 remove the dual service support. - s.RegisterService(frostFSServiceDesc(sessionGRPC.SessionService_ServiceDesc), server) - }) -} diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go deleted file mode 100644 index 65f5aec15..000000000 --- a/cmd/frostfs-node/tracing.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "context" - "time" - - tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.uber.org/zap" -) - -func initTracing(ctx context.Context, c *cfg) { - conf, err := tracingconfig.ToTracingConfig(c.appCfg) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err)) - return - } - _, err = tracing.Setup(ctx, *conf) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err)) - return - } - - c.closers = append(c.closers, closer{ - name: "tracing", - fn: func() { - ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second*5) - defer cancel() - err := tracing.Shutdown(ctx) // cfg context cancels before close - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) - } - }, - }) -} diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go deleted file mode 100644 index 72cf57e9d..000000000 --- a/cmd/frostfs-node/tree.go +++ /dev/null @@ -1,123 +0,0 @@ -package main - -import ( - "context" - "errors" - "net" - "time" - - treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type cnrSource struct { - // cache of raw client. - src container.Source - // raw client; no need to cache request results - // since sync is performed once in epoch and is - // expected to receive different results every - // call. - cli *containerClient.Client -} - -func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { - return c.src.Get(ctx, id) -} - -func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { - return c.src.DeletionInfo(ctx, cid) -} - -func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { - return c.cli.ContainersOf(ctx, nil) -} - -func initTreeService(c *cfg) { - treeConfig := treeconfig.Tree(c.appCfg) - if !treeConfig.Enabled() { - c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) - return - } - - c.treeService = tree.New( - tree.WithContainerSource(cnrSource{ - src: c.cfgObject.cnrSource, - cli: c.cnrClient, - }), - tree.WithFrostfsidSubjectProvider(c.frostfsidClient), - tree.WithNetmapSource(c.netMapSource), - tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), - tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), - tree.WithContainerCacheSize(treeConfig.CacheSize()), - tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), - tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()), - tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()), - tree.WithSyncBatchSize(treeConfig.SyncBatchSize()), - tree.WithSyncDisabled(treeConfig.UnsafeSyncDisabled()), - tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()), - tree.WithMetrics(c.metricsCollector.TreeService()), - tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()), - tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()), - tree.WithNetmapState(c.cfgNetmap.state), - tree.WithDialerSource(c.dialerSource), - ) - - c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) - }) - - c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { - c.treeService.Start(ctx) - })) - - if d := treeConfig.SyncInterval(); d == 0 { - addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) { - err := c.treeService.SynchronizeAll() - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) - } - }) - } else { - go func() { - tick := time.NewTicker(d) - defer tick.Stop() - - for range tick.C { - err := c.treeService.SynchronizeAll() - if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) - if errors.Is(err, tree.ErrShuttingDown) { - return - } - } - } - }() - } - - subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) { - ev := e.(containerEvent.DeleteSuccess) - - // This is executed asynchronously, so we don't care about the operation taking some time. - c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) - err := c.treeService.DropTree(ctx, ev.ID, "") - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. - c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, - zap.Stringer("cid", ev.ID), - zap.Error(err)) - } - }) - - c.onShutdown(c.treeService.Shutdown) -} diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go deleted file mode 100644 index 22d2e0aa9..000000000 --- a/cmd/frostfs-node/validate.go +++ /dev/null @@ -1,110 +0,0 @@ -package main - -import ( - "fmt" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" - treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// validateConfig validates storage node configuration. -func validateConfig(c *config.Config) error { - // logger configuration validation - - var loggerPrm logger.Prm - - err := loggerPrm.SetLevelString(loggerconfig.Level(c)) - if err != nil { - return fmt.Errorf("invalid logger level: %w", err) - } - - err = loggerPrm.SetDestination(loggerconfig.Destination(c)) - if err != nil { - return fmt.Errorf("invalid logger destination: %w", err) - } - - err = loggerPrm.SetTags(loggerconfig.Tags(c)) - if err != nil { - return fmt.Errorf("invalid list of allowed tags: %w", err) - } - - // shard configuration validation - - shardNum := 0 - paths := make(map[string]pathDescription) - return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { - if sc.WriteCache().Enabled() { - err := addPath(paths, "writecache", shardNum, sc.WriteCache().Path()) - if err != nil { - return err - } - } - - if err := addPath(paths, "metabase", shardNum, sc.Metabase().Path()); err != nil { - return err - } - - treeConfig := treeconfig.Tree(c) - if treeConfig.Enabled() { - err := addPath(paths, "pilorama", shardNum, sc.Pilorama().Path()) - if err != nil { - return err - } - } - - blobstor := sc.BlobStor().Storages() - if len(blobstor) != 2 { - return fmt.Errorf("blobstor section must have 2 components, got: %d", len(blobstor)) - } - for i := range blobstor { - switch blobstor[i].Type() { - case fstree.Type, blobovniczatree.Type: - default: - return fmt.Errorf("unexpected storage type: %s (shard %d)", blobstor[i].Type(), shardNum) - } - if blobstor[i].Perm()&0o600 != 0o600 { - return fmt.Errorf("invalid permissions for blobstor component: %s, "+ - "expected at least rw- for the owner (shard %d)", - blobstor[i].Perm(), shardNum) - } - if blobstor[i].Path() == "" { - return fmt.Errorf("blobstor component path is empty (shard %d)", shardNum) - } - err := addPath(paths, fmt.Sprintf("blobstor[%d]", i), shardNum, blobstor[i].Path()) - if err != nil { - return err - } - } - - shardNum++ - return nil - }) -} - -type pathDescription struct { - shard int - component string -} - -func addPath(paths map[string]pathDescription, component string, shard int, path string) error { - if path == "" { - return fmt.Errorf("%s at shard %d has empty path", component, shard) - } - - path = filepath.Clean(path) - c, ok := paths[path] - if ok { - return fmt.Errorf("%s at shard %d and %s at shard %d have the same paths: %s", - c.component, c.shard, component, shard, path) - } - - paths[path] = pathDescription{shard: shard, component: component} - return nil -} diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go deleted file mode 100644 index 495365cf0..000000000 --- a/cmd/frostfs-node/validate_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package main - -import ( - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestValidate(t *testing.T) { - const exampleConfigPrefix = "../../config/" - t.Run("examples", func(t *testing.T) { - p := filepath.Join(exampleConfigPrefix, "example/node") - configtest.ForEachFileType(p, func(c *config.Config) { - var err error - require.NotPanics(t, func() { - err = validateConfig(c) - }) - require.NoError(t, err) - }) - }) -} diff --git a/cmd/frostfs-node/worker.go b/cmd/frostfs-node/worker.go deleted file mode 100644 index c5649073b..000000000 --- a/cmd/frostfs-node/worker.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "context" -) - -type worker struct { - name string - fn func(context.Context) -} - -func newWorkerFromFunc(fn func(ctx context.Context)) worker { - return worker{ - fn: fn, - } -} - -func startWorkers(ctx context.Context, c *cfg) { - for _, wrk := range c.workers { - startWorker(ctx, c, wrk) - } -} - -func startWorker(ctx context.Context, c *cfg, wrk worker) { - c.wg.Add(1) - - go func(w worker) { - w.fn(ctx) - c.wg.Done() - }(wrk) -} - -func getWorker(c *cfg, name string) *worker { - for _, wrk := range c.workers { - if wrk.name == name { - return &wrk - } - } - return nil -} diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go deleted file mode 100644 index e5a35ab71..000000000 --- a/cmd/internal/common/ape/commands.go +++ /dev/null @@ -1,167 +0,0 @@ -package ape - -import ( - "encoding/hex" - "errors" - "fmt" - "strconv" - "strings" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/spf13/cobra" -) - -const ( - defaultNamespace = "root" - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" - - Ingress = "ingress" - S3 = "s3" -) - -var mChainName = map[string]apechain.Name{ - Ingress: apechain.Ingress, - S3: apechain.S3, -} - -var ( - errSettingDefaultValueWasDeclined = errors.New("setting default value was declined") - errUnknownTargetType = errors.New("unknown target type") - errUnsupportedChainName = errors.New("unsupported chain name") -) - -// PrintHumanReadableAPEChain print APE chain rules. -func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { - cmd.Println("Chain ID: " + string(chain.ID)) - cmd.Printf(" HEX: %x\n", chain.ID) - cmd.Println("Rules:") - for _, rule := range chain.Rules { - cmd.Println("\n\tStatus: " + rule.Status.String()) - cmd.Println("\tAny: " + strconv.FormatBool(rule.Any)) - cmd.Println("\tConditions:") - for _, c := range rule.Condition { - var ot string - switch c.Kind { - case apechain.KindResource: - ot = "Resource" - case apechain.KindRequest: - ot = "Request" - default: - panic("unknown object type") - } - cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value)) - } - cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted)) - for _, name := range rule.Actions.Names { - cmd.Println("\t\t" + name) - } - cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted)) - for _, name := range rule.Resources.Names { - cmd.Println("\t\t" + name) - } - } -} - -// ParseTarget handles target parsing of an APE chain. -func ParseTarget(cmd *cobra.Command) engine.Target { - typ := ParseTargetType(cmd) - name, _ := cmd.Flags().GetString(TargetNameFlag) - switch typ { - case engine.Namespace: - if name == "" { - ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace)) - commonCmd.ExitOnErr(cmd, "read line error: %w", err) - ln = strings.ToLower(ln) - if len(ln) > 0 && (ln[0] == 'n') { - commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined) - } - name = defaultNamespace - } - return engine.NamespaceTarget(name) - case engine.Container: - var cnr cid.ID - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) - return engine.ContainerTarget(name) - case engine.User: - return engine.UserTarget(name) - case engine.Group: - return engine.GroupTarget(name) - default: - commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) - } - panic("unreachable") -} - -// ParseTargetType handles target type parsing of an APE chain. -func ParseTargetType(cmd *cobra.Command) engine.TargetType { - typ, _ := cmd.Flags().GetString(TargetTypeFlag) - switch typ { - case namespaceTarget: - return engine.Namespace - case containerTarget: - return engine.Container - case userTarget: - return engine.User - case groupTarget: - return engine.Group - default: - commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType) - } - panic("unreachable") -} - -// ParseChainID handles the parsing of APE-chain identifier. -// For some subcommands, chain ID is optional as an input parameter and should be generated by -// the service instead. -func ParseChainID(cmd *cobra.Command) (id apechain.ID) { - chainID, _ := cmd.Flags().GetString(ChainIDFlag) - id = apechain.ID(chainID) - - hexEncoded, _ := cmd.Flags().GetBool(ChainIDHexFlag) - if !hexEncoded { - return - } - - chainIDRaw, err := hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - id = apechain.ID(chainIDRaw) - return -} - -// ParseChain parses an APE chain which can be provided either as a rule statement -// or loaded from a binary/JSON file path. -func ParseChain(cmd *cobra.Command) *apechain.Chain { - chain := new(apechain.Chain) - chain.ID = ParseChainID(cmd) - - if rules, _ := cmd.Flags().GetStringArray(RuleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", apeutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(PathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", apeutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed")) - } - - cmd.Println("Parsed chain:") - PrintHumanReadableAPEChain(cmd, chain) - - return chain -} - -// ParseChainName parses chain name: the place in the request lifecycle where policy is applied. -func ParseChainName(cmd *cobra.Command) apechain.Name { - chainName, _ := cmd.Flags().GetString(ChainNameFlag) - apeChainName, ok := mChainName[strings.ToLower(chainName)] - if !ok { - commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName) - } - return apeChainName -} diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go deleted file mode 100644 index d8b2e88a2..000000000 --- a/cmd/internal/common/ape/flags.go +++ /dev/null @@ -1,79 +0,0 @@ -package ape - -const ( - RuleFlag = "rule" - PathFlag = "path" - PathFlagDesc = "Path to encoded chain in JSON or binary format" - TargetNameFlag = "target-name" - TargetNameFlagDesc = "Resource name in APE resource name format" - TargetTypeFlag = "target-type" - TargetTypeFlagDesc = "Resource type(container/namespace)" - ChainIDFlag = "chain-id" - ChainIDFlagDesc = "Chain id" - ChainIDHexFlag = "chain-id-hex" - ChainIDHexFlagDesc = "Flag to parse chain ID as hex" - ChainNameFlag = "chain-name" - ChainNameFlagDesc = "Chain name(ingress|s3)" - AllFlag = "all" -) - -const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format: - [:status_detail] ... ... ... - -Status: - - allow Permits specified actions - - deny Prohibits specified actions - - deny:QuotaLimitReached Denies access due to quota limits - -Actions: - Object operations: - - Object.Put, Object.Get, etc. - - Object.* (all object operations) - Container operations: - - Container.Put, Container.Get, etc. - - Container.* (all container operations) - -Conditions: - ResourceCondition: - Format: ResourceCondition:"key"=value, "key"!=value - Reserved properties (use '\' before '$'): - - $Object:version - - $Object:objectID - - $Object:containerID - - $Object:ownerID - - $Object:creationEpoch - - $Object:payloadLength - - $Object:payloadHash - - $Object:objectType - - $Object:homomorphicHash - -RequestCondition: - Format: RequestCondition:"key"=value, "key"!=value - Reserved properties (use '\' before '$'): - - $Actor:publicKey - - $Actor:role - - Example: - ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others - -Resources: - For objects: - - namespace/cid/oid (specific object) - - namespace/cid/* (all objects in container) - - namespace/* (all objects in namespace) - - * (all objects) - - /* (all objects in root namespace) - - /cid/* (all objects in root container) - - /cid/oid (specific object in root container) - - For containers: - - namespace/cid (specific container) - - namespace/* (all containers in namespace) - - * (all containers) - - /cid (root container) - - /* (all root containers) - -Notes: - - Cannot mix object and container operations in one rule - - Default behavior is Any=false unless 'any' is specified - - Use 'all' keyword to explicitly set Any=false` diff --git a/cmd/internal/common/config/opts.go b/cmd/internal/common/config/opts.go deleted file mode 100644 index 46e565639..000000000 --- a/cmd/internal/common/config/opts.go +++ /dev/null @@ -1,49 +0,0 @@ -package config - -import "github.com/spf13/viper" - -type opts struct { - path string - configDir string - envPrefix string - v *viper.Viper -} - -func defaultOpts() *opts { - return new(opts) -} - -// Option allows to set an optional parameter of the Config. -type Option func(*opts) - -// WithConfigFile returns an option to set the system path -// to the configuration file. -func WithConfigFile(path string) Option { - return func(o *opts) { - o.path = path - } -} - -// WithConfigDir returns an option to set the system path -// to the directory with configuration files. -func WithConfigDir(path string) Option { - return func(o *opts) { - o.configDir = path - } -} - -// WithEnvPrefix returns an option to defines -// a prefix that ENVIRONMENT variables will use. -func WithEnvPrefix(envPrefix string) Option { - return func(o *opts) { - o.envPrefix = envPrefix - } -} - -// WithViper returns an option to defines -// a predefined viper.Viper. -func WithViper(v *viper.Viper) Option { - return func(o *opts) { - o.v = v - } -} diff --git a/cmd/internal/common/config/viper.go b/cmd/internal/common/config/viper.go deleted file mode 100644 index f06d407c1..000000000 --- a/cmd/internal/common/config/viper.go +++ /dev/null @@ -1,82 +0,0 @@ -package config - -import ( - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" - "github.com/spf13/viper" -) - -const ( - Separator = "." - - // EnvSeparator is a section separator in ENV variables. - EnvSeparator = "_" -) - -var errProvideViperInOpts = errors.New("provide viper in opts") - -func CreateViper(opts ...Option) (*viper.Viper, error) { - o := defaultOpts() - for i := range opts { - opts[i](o) - } - - var v *viper.Viper - if o.v != nil { - v = o.v - } else { - v = viper.New() - } - - if o.envPrefix != "" { - v.SetEnvPrefix(o.envPrefix) - v.AutomaticEnv() - v.SetEnvKeyReplacer(strings.NewReplacer(Separator, EnvSeparator)) - } - - if o.path != "" { - v.SetConfigFile(o.path) - - err := v.ReadInConfig() - if err != nil { - return nil, fmt.Errorf("failed to read config: %w", err) - } - } - - if o.configDir != "" { - if err := config.ReadConfigDir(v, o.configDir); err != nil { - return nil, fmt.Errorf("failed to read config dir: %w", err) - } - } - - return v, nil -} - -func ReloadViper(opts ...Option) error { - o := defaultOpts() - for i := range opts { - opts[i](o) - } - - if o.v == nil { - return errProvideViperInOpts - } - - if o.path != "" { - err := o.v.ReadInConfig() - if err != nil { - return fmt.Errorf("rereading configuration file: %w", err) - } - } - - if o.configDir != "" { - if err := config.ReadConfigDir(o.v, o.configDir); err != nil { - return fmt.Errorf("rereading configuration dir: %w", err) - } - } - - return nil -} diff --git a/cmd/internal/common/config/viper_test.go b/cmd/internal/common/config/viper_test.go deleted file mode 100644 index d533a15c2..000000000 --- a/cmd/internal/common/config/viper_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package config_test - -import ( - "encoding/json" - "os" - "path" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config/test" - "github.com/spf13/viper" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" -) - -func TestCreateReloadViper(t *testing.T) { - type m = map[string]any - - dummyFileSize := 1 << 10 - - configPath := t.TempDir() - configFile := "000_a.yaml" - - configDirPath := path.Join(configPath, "conf.d") - require.NoError(t, os.Mkdir(configDirPath, 0o700)) - - configtest.PrepareConfigFiles(t, configPath, []configtest.ConfigFile{ - configtest.NewConfigFile(configFile, m{"a": "000"}, yaml.Marshal), - }) - - // Not valid configs, dummy files those appear lexicographically first. - configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{ - configtest.NewDummyFile("000_file_1", dummyFileSize), - configtest.NewDummyFile("000_file_2", dummyFileSize), - configtest.NewDummyFile("000_file_3", dummyFileSize), - }) - - configtest.PrepareConfigFiles(t, configDirPath, []configtest.ConfigFile{ - // Valid configs with invalid extensions those appear lexicographically first. - configtest.NewConfigFile("001_a.yaml.un~", m{"a": "101"}, yaml.Marshal), - configtest.NewConfigFile("001_b.yml~", m{"b": m{"a": "102", "b": "103"}}, yaml.Marshal), - configtest.NewConfigFile("001_c.yaml.swp", m{"c": m{"a": "104", "b": "105"}}, yaml.Marshal), - configtest.NewConfigFile("001_d.json.swp", m{"d": m{"a": "106", "b": "107"}}, json.Marshal), - - // Valid configs with valid extensions those should be loaded. - configtest.NewConfigFile("010_a.yaml", m{"a": "1"}, yaml.Marshal), - configtest.NewConfigFile("020_b.yml", m{"b": m{"a": "2", "b": "3"}}, yaml.Marshal), - configtest.NewConfigFile("030_c.json", m{"c": m{"a": "4", "b": "5"}}, json.Marshal), - - // Valid configs with invalid extensions those appear lexicographically last. - configtest.NewConfigFile("099_a.yaml.un~", m{"a": "201"}, yaml.Marshal), - configtest.NewConfigFile("099_b.yml~", m{"b": m{"a": "202", "b": "203"}}, yaml.Marshal), - configtest.NewConfigFile("099_c.yaml.swp", m{"c": m{"a": "204", "b": "205"}}, yaml.Marshal), - configtest.NewConfigFile("099_c.json.swp", m{"d": m{"a": "206", "b": "207"}}, json.Marshal), - }) - - // Not valid configs, dummy files those appear lexicographically last. - configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{ - configtest.NewDummyFile("999_file_1", dummyFileSize), - configtest.NewDummyFile("999_file_2", dummyFileSize), - configtest.NewDummyFile("999_file_3", dummyFileSize), - }) - - finalConfig := m{"a": "1", "b": m{"a": "2", "b": "3"}, "c": m{"a": "4", "b": "5"}} - - var ( - v *viper.Viper - err error - ) - - t.Run("create config with config dir only", func(t *testing.T) { - v, err = config.CreateViper( - config.WithConfigDir(configDirPath), - ) - require.NoError(t, err) - assert.Equal(t, finalConfig, v.AllSettings()) - }) - - t.Run("reload config with config dir only", func(t *testing.T) { - err = config.ReloadViper( - config.WithViper(v), - config.WithConfigDir(configDirPath), - ) - require.NoError(t, err) - assert.Equal(t, finalConfig, v.AllSettings()) - }) - - t.Run("create config with both config and config dir", func(t *testing.T) { - v, err = config.CreateViper( - config.WithConfigFile(path.Join(configPath, configFile)), - config.WithConfigDir(configDirPath), - ) - require.NoError(t, err) - assert.Equal(t, finalConfig, v.AllSettings()) - }) - - t.Run("reload config with both config and config dir", func(t *testing.T) { - err = config.ReloadViper( - config.WithViper(v), - config.WithConfigFile(path.Join(configPath, configFile)), - config.WithConfigDir(configDirPath), - ) - require.NoError(t, err) - assert.Equal(t, finalConfig, v.AllSettings()) - }) -} diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go deleted file mode 100644 index 13f447af4..000000000 --- a/cmd/internal/common/exit.go +++ /dev/null @@ -1,63 +0,0 @@ -package common - -import ( - "errors" - "fmt" - "os" - - sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/spf13/cobra" -) - -// ExitOnErr prints error and exits with a code that matches -// one of the common errors from sdk library. If no errors -// found, exits with 1 code. -// Does nothing if passed error in nil. -func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { - if err == nil { - return - } - - if errFmt != "" { - err = fmt.Errorf(errFmt, err) - } - - const ( - _ = iota - internal - aclDenied - apemanagerDenied - ) - - var ( - code int - - internalErr = new(sdkstatus.ServerInternal) - accessErr = new(sdkstatus.ObjectAccessDenied) - apemanagerErr = new(sdkstatus.APEManagerAccessDenied) - ) - - switch { - case errors.As(err, &internalErr): - code = internal - case errors.As(err, &accessErr): - code = aclDenied - err = fmt.Errorf("%w: %s", err, accessErr.Reason()) - case errors.As(err, &apemanagerErr): - code = apemanagerDenied - err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason()) - default: - code = internal - } - - cmd.PrintErrln(err) - for p := cmd; p != nil; p = p.Parent() { - if p.PersistentPostRun != nil { - p.PersistentPostRun(cmd, nil) - if !cobra.EnableTraverseRunHooks { - break - } - } - } - os.Exit(code) -} diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go deleted file mode 100644 index 5dd1a060e..000000000 --- a/cmd/internal/common/netmap.go +++ /dev/null @@ -1,50 +0,0 @@ -package common - -import ( - "encoding/hex" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/spf13/cobra" -) - -// PrettyPrintNodeInfo print information about network node with given indent and index. -// To avoid printing attribute list use short parameter. -func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, - index int, indent string, short bool, -) { - var strState string - - switch node.Status() { - default: - strState = "STATE_UNSUPPORTED" - case netmap.Online: - strState = "ONLINE" - case netmap.Offline: - strState = "OFFLINE" - case netmap.Maintenance: - strState = "MAINTENANCE" - } - - cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - - for endpoint := range node.NetworkEndpoints() { - cmd.Printf("%s ", endpoint) - } - cmd.Println() - - if !short { - for key, value := range node.Attributes() { - cmd.Printf("%s\t%s: %s\n", indent, key, value) - } - } -} - -// PrettyPrintNetMap print information about network map. -func PrettyPrintNetMap(cmd *cobra.Command, nm netmap.NetMap, short bool) { - cmd.Println("Epoch:", nm.Epoch()) - - nodes := nm.Nodes() - for i := range nodes { - PrettyPrintNodeInfo(cmd, nodes[i], i, "", short) - } -} diff --git a/config/example/README.md b/config/example/README.md deleted file mode 100644 index 8dd0fe44a..000000000 --- a/config/example/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Examples of correct configuration file structures - -Here are files in all supported formats and with all possible configuration values -of FrostFS applications. See [node.yaml](node.yaml) for configuration notes. - -All parameters are correct, however, they are for informational purposes only. -It is not recommended transferring these configs for real application launches. - -## Config files - -- Storage node - - JSON: `node.json` - - YAML: `node.yaml` -- Inner ring - - YAML: `ir.yaml` -- CLI - - YAML: `cli.yaml` - -### Multiple configs - -You can split your configuration to several files. -For example, you can use separate yaml file for each shard or each service (pprof, prometheus). -You must use `--config-dir` flag to process several configs: - -```shell -$ ./bin/frotsfs-node --config ./config/example/node.yaml --config-dir ./dir/with/additional/configs -``` - -When the `--config-dir` flag set, the application: -* reads all `*.y[a]ml` files from provided directory, -* use Viper's [MergeConfig](https://pkg.go.dev/github.com/spf13/viper#MergeConfig) functionality to produce the final configuration, -* files are being processing in alphanumerical order so that `01.yaml` may be extended with contents of `02.yaml`, so -if a field is specified in multiple files, the latest occurrence takes effect. - -So if we have the following files: -```yaml -# 00.yaml -logger: - level: debug -pprof: - enabled: true - address: localhost:6060 -prometheus: - enabled: true - address: localhost:9090 -``` - -```yaml -# dir/01.yaml -logger: - level: info -pprof: - enabled: false -``` - -```yaml -# dir/02.yaml -logger: - level: warn -prometheus: - address: localhost:9091 -``` - -and provide the following flags: -```shell -$ ./bin/frotsfs-node --config 00.yaml --config-dir dir -``` - -result config will be: -```yaml -logger: - level: warn -pprof: - enabled: false - address: localhost:6060 -prometheus: - enabled: true - address: localhost:9091 -``` - -## Environment variables - -- Storage node: `node.env` -- Inner ring: `ir.env` diff --git a/config/example/cli.yaml b/config/example/cli.yaml deleted file mode 100644 index 0f8794bfd..000000000 --- a/config/example/cli.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -wallet: wallets/wallet.json # path to the NEP-6 wallet file or path to private key file or WIF -address: Nf5YDCHrrtFCbetGT8TF1kwH1SjnJjT9N1 # account address in the NEP-6 wallet, omit for default address -password: secret # account password, use "" for empty password -rpc-endpoint: s01.frostfs.devenv:8080 # NeoFS API endpoint of NeoFS node -endpoint: localhost:8090 # Control API endpoint of NeoFS node diff --git a/config/example/ir-control.yaml b/config/example/ir-control.yaml deleted file mode 100644 index a5cbafc49..000000000 --- a/config/example/ir-control.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -endpoint: 'localhost:8090' -wallet: '/etc/frostfs/ir/wallet.json' -password: '' -#ir: true diff --git a/config/example/ir.env b/config/example/ir.env deleted file mode 100644 index c13044a6e..000000000 --- a/config/example/ir.env +++ /dev/null @@ -1,93 +0,0 @@ -FROSTFS_IR_LOGGER_LEVEL=info -FROSTFS_IR_LOGGER_TIMESTAMP=true -FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" -FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" - -FROSTFS_IR_WALLET_PATH=/path/to/wallet.json -FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX -FROSTFS_IR_WALLET_PASSWORD=secret - -FROSTFS_IR_WITHOUT_MAINNET=false - -FROSTFS_IR_MORPH_DIAL_TIMEOUT=5s -FROSTFS_IR_MORPH_ENDPOINT_CLIENT_0_ADDRESS="wss://sidechain1.fs.neo.org:30333/ws" -FROSTFS_IR_MORPH_ENDPOINT_CLIENT_1_ADDRESS="wss://sidechain2.fs.neo.org:30333/ws" -FROSTFS_IR_MORPH_VALIDATORS="0283120f4c8c1fc1d792af5063d2def9da5fddc90bc1384de7fcfdda33c3860170" -FROSTFS_IR_MORPH_SWITCH_INTERVAL=2m - -FROSTFS_IR_MAINNET_DIAL_TIMEOUT=5s -FROSTFS_IR_MAINNET_ENDPOINT_CLIENT_0_ADDRESS="wss://mainchain1.fs.neo.org:30333/ws" -FROSTFS_IR_MAINNET_ENDPOINT_CLIENT_1_ADDRESS="wss://mainchain2.fs.neo.org:30333/ws" -FROSTFS_IR_MAINNET_SWITCH_INTERVAL=2m - -FROSTFS_IR_CONTROL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" -FROSTFS_IR_CONTROL_GRPC_ENDPOINT=localhost:8090 - -FROSTFS_IR_GOVERNANCE_DISABLE=false - -FROSTFS_IR_NODE_PERSISTENT_STATE_PATH=.frostfs-ir-state - -FROSTFS_IR_LOCODE_DB_PATH=/path/to/locode.db - -FROSTFS_IR_FEE_MAIN_CHAIN=50000000 -FROSTFS_IR_FEE_SIDE_CHAIN=200000000 - -FROSTFS_IR_TIMERS_EMIT=240 -FROSTFS_IR_TIMERS_STOP_ESTIMATION_MUL=1 -FROSTFS_IR_TIMERS_STOP_ESTIMATION_DIV=4 -FROSTFS_IR_TIMERS_COLLECT_BASIC_INCOME_MUL=1 -FROSTFS_IR_TIMERS_COLLECT_BASIC_INCOME_DIV=2 -FROSTFS_IR_TIMERS_DISTRIBUTE_BASIC_INCOME_MUL=3 -FROSTFS_IR_TIMERS_DISTRIBUTE_BASIC_INCOME_DIV=4 - -FROSTFS_IR_EMIT_STORAGE_AMOUNT=800000000 -FROSTFS_IR_EMIT_MINT_VALUE=20000000 -FROSTFS_IR_EMIT_MINT_CACHE_SIZE=1000 -FROSTFS_IR_EMIT_MINT_CACHE_THRESHOLD=1 -FROSTFS_IR_EMIT_GAS_BALANCE_THRESHOLD=100000000000 - -FROSTFS_IR_WORKERS_ALPHABET=10 -FROSTFS_IR_WORKERS_BALANCE=10 -FROSTFS_IR_WORKERS_CONTAINER=10 -FROSTFS_IR_WORKERS_NEOFS=10 -FROSTFS_IR_WORKERS_NETMAP=10 - -FROSTFS_IR_INDEXER_CACHE_TIMEOUT=15s - -FROSTFS_IR_NETMAP_CLEANER_ENABLED=true -FROSTFS_IR_NETMAP_CLEANER_THRESHOLD=3 - -FROSTFS_IR_CONTRACTS_NEOFS=ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 -FROSTFS_IR_CONTRACTS_PROCESSING=597f5894867113a41e192801709c02497f611de8 -FROSTFS_IR_CONTRACTS_BALANCE=d2aa48d14b17b11bc4c68205027884a96706dd16 -FROSTFS_IR_CONTRACTS_CONTAINER=ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 -FROSTFS_IR_CONTRACTS_NEOFSID=9f5866decbc751a099e74c7c7bc89f609201755a -FROSTFS_IR_CONTRACTS_NETMAP=83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 -FROSTFS_IR_CONTRACTS_PROXY=abc8794bb40a21f2db5f21ae62741eb46c8cad1c -FROSTFS_IR_CONTRACTS_ALPHABET_AMOUNT=7 -FROSTFS_IR_CONTRACTS_ALPHABET_AZ=c1d211fceeb4b1dc76b8e4054d11fdf887e418ea -FROSTFS_IR_CONTRACTS_ALPHABET_BUKY=e2ba789320899658b100f331bdebb74474757920 -FROSTFS_IR_CONTRACTS_ALPHABET_VEDI=9623c11fd49aa18220458fbaae0396749c669e19 -FROSTFS_IR_CONTRACTS_ALPHABET_GLAGOLI=39617441b8e06c55e4fc3ce660be9101b6c1f866 -FROSTFS_IR_CONTRACTS_ALPHABET_DOBRO=e6122b65d45c8feeb04455d67814394c147ed4d1 -FROSTFS_IR_CONTRACTS_ALPHABET_YEST=cdbca5cb5d48a4472923844d0e3ee6328cf86d38 -FROSTFS_IR_CONTRACTS_ALPHABET_ZHIVETE=f584699bc2ff457d339fb09f16217042c1a42101 - -FROSTFS_IR_PPROF_ENABLED=true -FROSTFS_IR_PPROF_ADDRESS=localhost:6060 -FROSTFS_IR_PPROF_SHUTDOWN_TIMEOUT=30s -FROSTFS_IR_PPROF_BLOCK_RATE=10000 -FROSTFS_IR_PPROF_MUTEX_RATE=10000 - -FROSTFS_IR_PROMETHEUS_ENABLED=true -FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090 -FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s - -FROSTFS_MULTINET_ENABLED=true -FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24" -FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185" -FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24" -FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" -FROSTFS_MULTINET_BALANCER=roundrobin -FROSTFS_MULTINET_RESTRICT=false -FROSTFS_MULTINET_FALLBACK_DELAY=350ms diff --git a/config/example/ir.yaml b/config/example/ir.yaml deleted file mode 100644 index ed53f014b..000000000 --- a/config/example/ir.yaml +++ /dev/null @@ -1,143 +0,0 @@ ---- - -logger: - level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" - timestamp: true - tags: - - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. - level: debug - -wallet: - path: /path/to/wallet.json # Path to NEP-6 NEO wallet file - address: NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX # Account address in the wallet; ignore to use default address - password: secret # Account password in the wallet - -without_mainnet: false # Run application in single chain environment without mainchain - -morph: - dial_timeout: 5s # Timeout for RPC client connection to sidechain - endpoint: - client: # List of websocket RPC endpoints in sidechain - - address: wss://sidechain1.fs.neo.org:30333/ws - - address: wss://sidechain2.fs.neo.org:30333/ws - validators: # List of hex-encoded 33-byte public keys of sidechain validators to vote for at application startup - - 0283120f4c8c1fc1d792af5063d2def9da5fddc90bc1384de7fcfdda33c3860170 - switch_interval: 2m # interval b/w RPC switch attempts if the node is not connected to the highest priority node - -mainnet: - dial_timeout: 5s # Timeout for RPC client connection to mainchain; ignore if mainchain is disabled - switch_interval: 2m # interval b/w RPC switch attempts if the node is not connected to the highest priority node - endpoint: - client: # List of websocket RPC endpoints in mainchain; ignore if mainchain is disabled - - address: wss://mainchain1.fs.neo.org:30333/ws - - address: wss://mainchain.fs.neo.org:30333/ws - -control: - authorized_keys: # List of hex-encoded 33-byte public keys that have rights to use the control service - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - grpc: - endpoint: localhost:8090 # Endpoint that is listened by the control service; disabled by default - -governance: - disable: false # Disable synchronization of sidechain committee and mainchain role management contract; ignore if mainchain is disabled - -node: - persistent_state: - path: .frostfs-ir-state # Path to application state file - -locode: - db: - path: /path/to/locode.db # Path to UN/LOCODE database file - -fee: - main_chain: 50000000 # Fixed8 value of extra GAS fee for mainchain contract invocation; ignore if notary is enabled in mainchain - side_chain: 200000000 # Fixed8 value of extra GAS fee for sidechain contract invocation; ignore if notary is enabled in sidechain - -timers: - emit: 240 # Number of sidechain blocks between GAS emission cycles; disabled by default - stop_estimation: - mul: 1 # Multiplier in x/y relation of when to stop basic income estimation within the epoch - div: 4 # Divider in x/y relation of when to stop basic income estimation within the epoch - collect_basic_income: - mul: 1 # Multiplier in x/y relation of when to start basic income asset collection within the epoch - div: 2 # Divider in x/y relation of when to start basic income asset collecting within the epoch - distribute_basic_income: - mul: 3 # Multiplier in x/y relation of when to start basic income asset distribution within the epoch - div: 4 # Divider in x/y relation of when to start basic income asset distribution within the epoch - -emit: - storage: - amount: 800000000 # Fixed8 value of sidechain GAS emitted to all storage nodes once per GAS emission cycle; disabled by default - mint: - value: 20000000 # Fixed8 value of sidechain GAS transferred to account that received a deposit from mainchain - cache_size: 1000 # LRU cache size of all deposit receivers to avoid double GAS emission - threshold: 1 # Lifetime of records in LRU cache of all deposit receivers in FrostFS epochs - gas: - balance_threshold: 100000000000 # Fixed8 value of inner ring wallet balance threshold when GAS emission for deposit receivers is disabled; disabled by default - extra_wallets: # wallet addresses that are included in gas emission process in equal share with network map nodes - - "NQcfMqU6pfXFwaaBN6KHcTpT63eMtzk6eH" - - "NaSVC4xKySQBpKr1XRVYFCHjLhuYXnMBrP" - - "NT9jL5XcxcDt2iTj67o2d5xNfDxquN3pPk" - -workers: - alphabet: 10 # Number of workers to process events from alphabet contract in parallel - balance: 10 # Number of workers to process events from balance contract in parallel - container: 10 # Number of workers to process events from container contract in parallel - frostfs: 10 # Number of workers to process events from frostfs contracts in parallel - netmap: 10 # Number of workers to process events from netmap contract in parallel - -indexer: - cache_timeout: 15s # Duration between internal state update about current list of inner ring nodes - -netmap_cleaner: - enabled: true # Enable voting for removing stale storage nodes from network map - threshold: 3 # Number of FrostFS epoch without bootstrap request from storage node before it considered stale - -contracts: - frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 # Address of FrostFS contract in mainchain; ignore if mainchain is disabled - processing: 597f5894867113a41e192801709c02497f611de8 # Address of processing contract in mainchain; ignore if mainchain is disabled or notary is disabled in mainchain - balance: d2aa48d14b17b11bc4c68205027884a96706dd16 # Optional: override address of balance contract in sidechain - container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 # Optional: override address of container contract in sidechain - frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a # Optional: override address of frostfsid contract in sidechain - netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 # Optional: override address of netmap contract in sidechain - proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c # Optional: override address of proxy contract in sidechain; ignore if notary is disabled in sidechain - alphabet: - amount: 7 # Optional: override amount of alphabet contracts - az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea # Optional: override address of az alphabet contract in sidechain - buky: e2ba789320899658b100f331bdebb74474757920 # Optional: override address of buky alphabet contract in sidechain - vedi: 9623c11fd49aa18220458fbaae0396749c669e19 # Optional: override address of vedi alphabet contract in sidechain - glagoli: 39617441b8e06c55e4fc3ce660be9101b6c1f866 # Optional: override address of glagoli alphabet contract in sidechain - dobro: e6122b65d45c8feeb04455d67814394c147ed4d1 # Optional: override address of dobro alphabet contract in sidechain - yest: cdbca5cb5d48a4472923844d0e3ee6328cf86d38 # Optional: override address of yest contract in sidechain - zhivete: f584699bc2ff457d339fb09f16217042c1a42101 # Optional: override address of zhivete contract in sidechain - -pprof: - enabled: true - address: localhost:6060 # Endpoint for application pprof profiling; disabled by default - shutdown_timeout: 30s # Timeout for profiling HTTP server graceful shutdown - block_rate: 10000 # sampling rate: an average of one blocking event per rate nanoseconds spent blocked is reported; "1" reports every blocking event; "0" disables profiler - mutex_rate: 10000 # sampling rate: on average 1/rate events are reported; "0" disables profiler - -prometheus: - enabled: true - address: localhost:9090 # Endpoint for application prometheus metrics; disabled by default - shutdown_timeout: 30s # Timeout for metrics HTTP server graceful shutdown - -systemdnotify: - enabled: true - -multinet: - enabled: true - subnets: - - mask: 192.168.219.174/24 - source_ips: - - 192.168.218.185 - - 192.168.219.185 - - mask: 10.78.70.74/24 - source_ips: - - 10.78.70.185 - - 10.78.71.185 - balancer: roundrobin - restrict: false - fallback_delay: 350ms diff --git a/config/example/node-control.yaml b/config/example/node-control.yaml deleted file mode 100644 index 2e32b2d96..000000000 --- a/config/example/node-control.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -endpoint: 'localhost:8091' -wallet: '/etc/frostfs/storage/wallet.json' -password: '' diff --git a/config/example/node.env b/config/example/node.env deleted file mode 100644 index 9a2426358..000000000 --- a/config/example/node.env +++ /dev/null @@ -1,283 +0,0 @@ -FROSTFS_LOGGER_LEVEL=debug -FROSTFS_LOGGER_DESTINATION=journald -FROSTFS_LOGGER_TIMESTAMP=true -FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" -FROSTFS_LOGGER_TAGS_0_LEVEL="debug" - -FROSTFS_PPROF_ENABLED=true -FROSTFS_PPROF_ADDRESS=localhost:6060 -FROSTFS_PPROF_SHUTDOWN_TIMEOUT=15s -FROSTFS_PPROF_BLOCK_RATE=10000 -FROSTFS_PPROF_MUTEX_RATE=10000 - -FROSTFS_PROMETHEUS_ENABLED=true -FROSTFS_PROMETHEUS_ADDRESS=localhost:9090 -FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT=15s - -# Node section -FROSTFS_NODE_KEY=./wallet.key -FROSTFS_NODE_WALLET_PATH=./wallet.json -FROSTFS_NODE_WALLET_ADDRESS=NcpJzXcSDrh5CCizf4K9Ro6w4t59J5LKzz -FROSTFS_NODE_WALLET_PASSWORD=password -FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" -FROSTFS_NODE_ATTRIBUTE_0=Price:11 -FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" -FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions -FROSTFS_NODE_PERSISTENT_STATE_PATH=/state -FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db - -# Tree service section -FROSTFS_TREE_ENABLED=true -FROSTFS_TREE_CACHE_SIZE=15 -FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32 -FROSTFS_TREE_REPLICATION_WORKER_COUNT=32 -FROSTFS_TREE_REPLICATION_TIMEOUT=5s -FROSTFS_TREE_SYNC_INTERVAL=1h -FROSTFS_TREE_SYNC_BATCH_SIZE=2000 -FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56" - -# gRPC section -## 0 server -FROSTFS_GRPC_0_ENDPOINT=s01.frostfs.devenv:8080 -### TLS config -FROSTFS_GRPC_0_TLS_ENABLED=true -FROSTFS_GRPC_0_TLS_CERTIFICATE=/path/to/cert -FROSTFS_GRPC_0_TLS_KEY=/path/to/key - -## 1 server -FROSTFS_GRPC_1_ENDPOINT=s02.frostfs.devenv:8080 -### TLS config -FROSTFS_GRPC_1_TLS_ENABLED=false - -# Control service section -FROSTFS_CONTROL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" -FROSTFS_CONTROL_GRPC_ENDPOINT=localhost:8090 - -# Contracts section -FROSTFS_CONTRACTS_BALANCE=5263abba1abedbf79bb57f3e40b50b4425d2d6cd -FROSTFS_CONTRACTS_CONTAINER=5d084790d7aa36cea7b53fe897380dab11d2cd3c -FROSTFS_CONTRACTS_NETMAP=0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca -FROSTFS_CONTRACTS_PROXY=ad7c6b55b737b696e5c82c85445040964a03e97f - -# Morph chain section -FROSTFS_MORPH_DIAL_TIMEOUT=30s -FROSTFS_MORPH_CACHE_TTL=15s -FROSTFS_MORPH_SWITCH_INTERVAL=3m -FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS="wss://rpc1.morph.frostfs.info:40341/ws" -FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY=0 -FROSTFS_MORPH_RPC_ENDPOINT_0_TRUSTED_CA_LIST="/path/to/ca.pem" -FROSTFS_MORPH_RPC_ENDPOINT_0_CERTIFICATE="/path/to/cert" -FROSTFS_MORPH_RPC_ENDPOINT_0_KEY="/path/to/key" -FROSTFS_MORPH_RPC_ENDPOINT_1_ADDRESS="wss://rpc2.morph.frostfs.info:40341/ws" -FROSTFS_MORPH_RPC_ENDPOINT_1_PRIORITY=2 -FROSTFS_MORPH_APE_CHAIN_CACHE_SIZE=100000 - -# API Client section -FROSTFS_APICLIENT_DIAL_TIMEOUT=15s -FROSTFS_APICLIENT_STREAM_TIMEOUT=20s -FROSTFS_APICLIENT_RECONNECT_TIMEOUT=30s -FROSTFS_APICLIENT_ALLOW_EXTERNAL=true - -# Policer section -FROSTFS_POLICER_HEAD_TIMEOUT=15s - -# Replicator section -FROSTFS_REPLICATOR_PUT_TIMEOUT=15s -FROSTFS_REPLICATOR_POOL_SIZE=10 - -# Container service section -FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 - -# Object service section -FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true -FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 -FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" - -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_0_MAX_OPS=1000 -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 - -# Storage engine section -FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 -## 0 shard -### Flag to refill Metabase from BlobStor -FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false -FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE_WORKER_COUNT=100 -### Flag to set shard mode -FROSTFS_STORAGE_SHARD_0_MODE=read-only -### Write cache config -FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED=false -FROSTFS_STORAGE_SHARD_0_WRITECACHE_NO_SYNC=true -FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH=tmp/0/cache -FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384 -FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728 -FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30 -FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472 -FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096 -FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49 -FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_FLUSHING_OBJECTS_SIZE=100 -### Metabase config -FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta -FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 -FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 -FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms -### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true -FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest -FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" -FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true -FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 -FROSTFS_STORAGE_SHARD_0_SMALL_OBJECT_SIZE=102400 -### Blobovnicza config -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH=tmp/0/blob/blobovnicza -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PERM=0644 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE=blobovnicza -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_SIZE=4194304 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_TTL=1m -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_EXP_INTERVAL=30s -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_INIT_WORKER_COUNT=10 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_REBUILD_DROP_TIMEOUT=30s -### FSTree config -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE=fstree -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH=tmp/0/blob -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PERM=0644 -FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH=5 -### Pilorama config -FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH="tmp/0/blob/pilorama.db" -FROSTFS_STORAGE_SHARD_0_PILORAMA_MAX_BATCH_DELAY=10ms -FROSTFS_STORAGE_SHARD_0_PILORAMA_MAX_BATCH_SIZE=200 -### GC config -#### Limit of the single data remover's batching operation in number of objects -FROSTFS_STORAGE_SHARD_0_GC_REMOVER_BATCH_SIZE=150 -#### Sleep interval between data remover tacts -FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m -#### Limit of objects to be marked expired by the garbage collector -FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 -#### Limit of concurrent workers collecting expired objects by the garbage collector -FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 -#### Limits config -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 - -## 1 shard -### Flag to refill Metabase from BlobStor -FROSTFS_STORAGE_SHARD_1_RESYNC_METABASE=true -### Flag to set shard mode -FROSTFS_STORAGE_SHARD_1_MODE=read-write -### Write cache config -FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED=true -FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH=tmp/1/cache -FROSTFS_STORAGE_SHARD_1_WRITECACHE_SMALL_OBJECT_SIZE=16384 -FROSTFS_STORAGE_SHARD_1_WRITECACHE_MAX_OBJECT_SIZE=134217728 -FROSTFS_STORAGE_SHARD_1_WRITECACHE_FLUSH_WORKER_COUNT=30 -FROSTFS_STORAGE_SHARD_1_WRITECACHE_CAPACITY=4294967296 -### Metabase config -FROSTFS_STORAGE_SHARD_1_METABASE_PATH=tmp/1/meta -FROSTFS_STORAGE_SHARD_1_METABASE_PERM=0644 -FROSTFS_STORAGE_SHARD_1_METABASE_MAX_BATCH_SIZE=200 -FROSTFS_STORAGE_SHARD_1_METABASE_MAX_BATCH_DELAY=20ms -### Blobstor config -FROSTFS_STORAGE_SHARD_1_COMPRESS=false -FROSTFS_STORAGE_SHARD_1_SMALL_OBJECT_SIZE=102400 -### Blobovnicza config -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE=blobovnicza -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH=tmp/1/blob/blobovnicza -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_SIZE=4194304 -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1 -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4 -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50 -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_TTL=5m -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_EXP_INTERVAL=15s -### FSTree config -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PERM=0644 -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_NO_SYNC=true -FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH=5 -### Pilorama config -FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH="tmp/1/blob/pilorama.db" -FROSTFS_STORAGE_SHARD_1_PILORAMA_PERM=0644 -FROSTFS_STORAGE_SHARD_1_PILORAMA_NO_SYNC=true -FROSTFS_STORAGE_SHARD_1_PILORAMA_MAX_BATCH_DELAY=5ms -FROSTFS_STORAGE_SHARD_1_PILORAMA_MAX_BATCH_SIZE=100 -### GC config -#### Limit of the single data remover's batching operation in number of objects -FROSTFS_STORAGE_SHARD_1_GC_REMOVER_BATCH_SIZE=200 -#### Sleep interval between data remover tacts -FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m - -FROSTFS_TRACING_ENABLED=true -FROSTFS_TRACING_ENDPOINT="localhost" -FROSTFS_TRACING_EXPORTER="otlp_grpc" -FROSTFS_TRACING_TRUSTED_CA="" -FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0 -FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value -FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1 -FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value - -FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824 - -# AUDIT section -FROSTFS_AUDIT_ENABLED=true - -# MULTINET section -FROSTFS_MULTINET_ENABLED=true -FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24" -FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185" -FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24" -FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" -FROSTFS_MULTINET_BALANCER=roundrobin -FROSTFS_MULTINET_RESTRICT=false -FROSTFS_MULTINET_FALLBACK_DELAY=350ms - -FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" -FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" diff --git a/config/example/node.json b/config/example/node.json deleted file mode 100644 index 6b7a9c2c6..000000000 --- a/config/example/node.json +++ /dev/null @@ -1,429 +0,0 @@ -{ - "logger": { - "level": "debug", - "destination": "journald", - "timestamp": true, - "tags": [ - { - "names": "main, morph", - "level": "debug" - } - ] - }, - "pprof": { - "enabled": true, - "address": "localhost:6060", - "shutdown_timeout": "15s", - "block_rate": 10000, - "mutex_rate": 10000 - }, - "prometheus": { - "enabled": true, - "address": "localhost:9090", - "shutdown_timeout": "15s" - }, - "node": { - "key": "./wallet.key", - "wallet": { - "path": "./wallet.json", - "address": "NcpJzXcSDrh5CCizf4K9Ro6w4t59J5LKzz", - "password": "password" - }, - "addresses": [ - "s01.frostfs.devenv:8080", - "/dns4/s02.frostfs.devenv/tcp/8081", - "grpc://127.0.0.1:8082", - "grpcs://localhost:8083" - ], - "attribute_0": "Price:11", - "attribute_1": "UN-LOCODE:RU MSK", - "persistent_sessions": { - "path": "/sessions" - }, - "persistent_state": { - "path": "/state" - }, - "locode_db_path": "/path/to/locode/db" - }, - "grpc": { - "0": { - "endpoint": "s01.frostfs.devenv:8080", - "tls": { - "enabled": true, - "certificate": "/path/to/cert", - "key": "/path/to/key" - } - }, - "1": { - "endpoint": "s02.frostfs.devenv:8080", - "tls": { - "enabled": false - } - }, - "2": { - "endpoint": "s03.frostfs.devenv:8080", - "tls": { - "enabled": true, - "use_insecure_crypto": true - } - } - }, - "tree": { - "enabled": true, - "cache_size": 15, - "replication_channel_capacity": 32, - "replication_worker_count": 32, - "replication_timeout": "5s", - "sync_interval": "1h", - "sync_batch_size": 2000, - "authorized_keys": [ - "0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0", - "02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56" - ] - }, - "control": { - "authorized_keys": [ - "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", - "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" - ], - "grpc": { - "endpoint": "localhost:8090" - } - }, - "contracts": { - "balance": "5263abba1abedbf79bb57f3e40b50b4425d2d6cd", - "container": "5d084790d7aa36cea7b53fe897380dab11d2cd3c", - "netmap": "0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca", - "proxy": "ad7c6b55b737b696e5c82c85445040964a03e97f" - }, - "morph": { - "dial_timeout": "30s", - "cache_ttl": "15s", - "switch_interval": "3m", - "rpc_endpoint": [ - { - "address": "wss://rpc1.morph.frostfs.info:40341/ws", - "priority": 0, - "trusted_ca_list": [ - "/path/to/ca.pem" - ], - "certificate": "/path/to/cert", - "key": "/path/to/key" - }, - { - "address": "wss://rpc2.morph.frostfs.info:40341/ws", - "priority": 2 - } - ], - "ape_chain_cache_size": 100000 - }, - "apiclient": { - "dial_timeout": "15s", - "stream_timeout": "20s", - "reconnect_timeout": "30s", - "allow_external": true - }, - "policer": { - "head_timeout": "15s" - }, - "replicator": { - "pool_size": 10, - "put_timeout": "15s" - }, - "container": { - "list_stream": { - "batch_size": "500" - } - }, - "object": { - "delete": { - "tombstone_lifetime": 10 - }, - "put": { - "skip_session_token_issuer_verification": true - }, - "get": { - "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] - } - }, - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ], - "max_ops": 1000 - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - }, - "storage": { - "shard_ro_error_threshold": 100, - "shard": { - "0": { - "mode": "read-only", - "resync_metabase": false, - "resync_metabase_worker_count": 100, - "writecache": { - "enabled": false, - "no_sync": true, - "path": "tmp/0/cache", - "small_object_size": 16384, - "max_object_size": 134217728, - "flush_worker_count": 30, - "capacity": 3221225472, - "page_size": 4096, - "max_object_count": 49, - "max_flushing_objects_size": 100 - }, - "metabase": { - "path": "tmp/0/meta", - "perm": "0644", - "max_batch_size": 100, - "max_batch_delay": "10ms" - }, - "compression": { - "enabled": true, - "level": "fastest", - "exclude_content_types": [ - "audio/*", "video/*" - ], - "estimate_compressibility": true, - "estimate_compressibility_threshold": 0.7 - }, - "small_object_size": 102400, - "blobstor": [ - { - "type": "blobovnicza", - "path": "tmp/0/blob/blobovnicza", - "perm": "0644", - "size": 4194304, - "depth": 1, - "width": 4, - "opened_cache_capacity": 50, - "opened_cache_ttl": "1m", - "opened_cache_exp_interval": "30s", - "init_worker_count": 10, - "rebuild_drop_timeout": "30s" - }, - { - "type": "fstree", - "path": "tmp/0/blob", - "perm": "0644", - "depth": 5 - } - ], - "pilorama": { - "path": "tmp/0/blob/pilorama.db", - "max_batch_delay": "10ms", - "max_batch_size": 200 - }, - "gc": { - "remover_batch_size": 150, - "remover_sleep_interval": "2m", - "expired_collector_batch_size": 1500, - "expired_collector_worker_count": 15 - }, - "limits": { - "read": { - "max_running_ops": 10000, - "max_waiting_ops": 1000, - "idle_timeout": "30s", - "tags": [ - { - "tag": "internal", - "weight": 20, - "limit_ops": 0, - "reserved_ops": 1000 - }, - { - "tag": "client", - "weight": 70, - "reserved_ops": 10000 - }, - { - "tag": "background", - "weight": 5, - "limit_ops": 10000, - "reserved_ops": 0 - }, - { - "tag": "writecache", - "weight": 5, - "limit_ops": 25000 - }, - { - "tag": "policer", - "weight": 5, - "limit_ops": 25000, - "prohibited": true - }, - { - "tag": "treesync", - "weight": 5, - "limit_ops": 25 - } - ] - }, - "write": { - "max_running_ops": 1000, - "max_waiting_ops": 100, - "idle_timeout": "45s", - "tags": [ - { - "tag": "internal", - "weight": 200, - "limit_ops": 0, - "reserved_ops": 100 - }, - { - "tag": "client", - "weight": 700, - "reserved_ops": 1000 - }, - { - "tag": "background", - "weight": 50, - "limit_ops": 1000, - "reserved_ops": 0 - }, - { - "tag": "writecache", - "weight": 50, - "limit_ops": 2500 - }, - { - "tag": "policer", - "weight": 50, - "limit_ops": 2500 - }, - { - "tag": "treesync", - "weight": 50, - "limit_ops": 100 - } - ] - } - } - }, - "1": { - "mode": "read-write", - "resync_metabase": true, - "writecache": { - "enabled": true, - "path": "tmp/1/cache", - "memcache_capacity": 2147483648, - "small_object_size": 16384, - "max_object_size": 134217728, - "flush_worker_count": 30, - "capacity": 4294967296 - }, - "metabase": { - "path": "tmp/1/meta", - "perm": "0644", - "max_batch_size": 200, - "max_batch_delay": "20ms" - }, - "compression": { - "enabled": false - }, - "small_object_size": 102400, - "blobstor": [ - { - "type": "blobovnicza", - "path": "tmp/1/blob/blobovnicza", - "perm": "0644", - "size": 4194304, - "depth": 1, - "width": 4, - "opened_cache_capacity": 50, - "opened_cache_ttl": "5m", - "opened_cache_exp_interval": "15s" - }, - { - "type": "fstree", - "path": "tmp/1/blob", - "no_sync": true, - "perm": "0644", - "depth": 5 - } - ], - "pilorama": { - "path": "tmp/1/blob/pilorama.db", - "perm": "0644", - "no_sync": true, - "max_batch_delay": "5ms", - "max_batch_size": 100 - }, - "gc": { - "remover_batch_size": 200, - "remover_sleep_interval": "5m" - } - } - } - }, - "tracing": { - "enabled": true, - "endpoint": "localhost", - "exporter": "otlp_grpc", - "trusted_ca": "", - "attributes":[ - { - "key": "key0", - "value": "value" - }, - { - "key": "key1", - "value": "value" - } - ] - }, - "runtime": { - "soft_memory_limit": 1073741824 - }, - "audit": { - "enabled": true - }, - "multinet": { - "enabled": true, - "subnets": [ - { - "mask": "192.168.219.174/24", - "source_ips": [ - "192.168.218.185", - "192.168.219.185" - ] - }, - { - "mask": "10.78.70.74/24", - "source_ips":[ - "10.78.70.185", - "10.78.71.185" - ] - } - ], - "balancer": "roundrobin", - "restrict": false, - "fallback_delay": "350ms" - }, - "qos": { - "critical": { - "authorized_keys": [ - "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", - "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" - ] - }, - "internal": { - "authorized_keys": [ - "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", - "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" - ] - } - } -} diff --git a/config/example/node.yaml b/config/example/node.yaml deleted file mode 100644 index 2d4bc90fb..000000000 --- a/config/example/node.yaml +++ /dev/null @@ -1,350 +0,0 @@ -logger: - level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" - destination: journald # logger destination: one of "stdout" (default), "journald" - timestamp: true - tags: - - names: "main, morph" - level: debug - -systemdnotify: - enabled: true - -pprof: - enabled: true - address: localhost:6060 # endpoint for Node profiling - shutdown_timeout: 15s # timeout for profiling HTTP server graceful shutdown - block_rate: 10000 # sampling rate: an average of one blocking event per rate nanoseconds spent blocked is reported; "1" reports every blocking event; "0" disables profiler - mutex_rate: 10000 # sampling rate: on average 1/rate events are reported; "0" disables profiler - -prometheus: - enabled: true - address: localhost:9090 # endpoint for Node metrics - shutdown_timeout: 15s # timeout for metrics HTTP server graceful shutdown - -node: - key: ./wallet.key # path to a binary private key - wallet: - path: "./wallet.json" # path to a NEO wallet; ignored if key is presented - address: "NcpJzXcSDrh5CCizf4K9Ro6w4t59J5LKzz" # address of a NEO account in the wallet; ignored if key is presented - password: "password" # password for a NEO account in the wallet; ignored if key is presented - addresses: # list of addresses announced by Storage node in the Network map - - s01.frostfs.devenv:8080 - - /dns4/s02.frostfs.devenv/tcp/8081 - - grpc://127.0.0.1:8082 - - grpcs://localhost:8083 - attribute_0: "Price:11" - attribute_1: UN-LOCODE:RU MSK - persistent_sessions: - path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) - persistent_state: - path: /state # path to persistent state file of Storage node - "locode_db_path": "/path/to/locode/db" - -grpc: - - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server - tls: - enabled: true # use TLS for a gRPC connection (min version is TLS 1.2) - certificate: /path/to/cert # path to TLS certificate - key: /path/to/key # path to TLS key - - - endpoint: s02.frostfs.devenv:8080 # endpoint for gRPC server - tls: - enabled: false # use TLS for a gRPC connection - - endpoint: s03.frostfs.devenv:8080 - tls: - enabled: true - use_insecure_crypto: true # allow using insecure ciphers with TLS 1.2 - -tree: - enabled: true - cache_size: 15 - replication_worker_count: 32 - replication_channel_capacity: 32 - replication_timeout: 5s - sync_interval: 1h - sync_batch_size: 2000 - authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli - - 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 - - 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56 - -control: - authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - grpc: - endpoint: localhost:8090 # endpoint that is listened by the Control Service - -contracts: # side chain NEOFS contract script hashes; optional, override values retrieved from NNS contract - balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd - container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c - netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca - proxy: ad7c6b55b737b696e5c82c85445040964a03e97f - -morph: - dial_timeout: 30s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). - # Negative value disables caching. A zero value sets the default value. - # Default value: block time. It is recommended to have this value less or equal to block time. - # Cached entities: containers, container lists, eACL tables. - container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. - switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node - rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success - - address: wss://rpc1.morph.frostfs.info:40341/ws - priority: 0 - trusted_ca_list: - - "/path/to/ca.pem" - certificate: "/path/to/cert" - key: "/path/to/key" - - address: wss://rpc2.morph.frostfs.info:40341/ws - priority: 2 - ape_chain_cache_size: 100000 - netmap: - candidates: - poll_interval: 20s - -apiclient: - dial_timeout: 15s # timeout for FrostFS API client connection - stream_timeout: 20s # timeout for individual operations in a streaming RPC - allow_external: true # allow to fallback to addresses in `ExternalAddr` attribute - reconnect_timeout: 30s # time to wait before reconnecting to a failed node - -policer: - head_timeout: 15s # timeout for the Policer HEAD remote operation - -replicator: - put_timeout: 15s # timeout for the Replicator PUT remote operation - pool_size: 10 # maximum amount of concurrent replications - -container: - list_stream: - batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once - -object: - delete: - tombstone_lifetime: 10 # tombstone "local" lifetime in epochs - put: - skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true - get: - priority: # list of metrics of nodes for prioritization - - $attribute:ClusterName - - $attribute:UN-LOCODE - -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 1000 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 - -storage: - shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) - - shard: - default: # section with the default shard parameters - resync_metabase: true # sync metabase with blobstor on start, expensive, leave false until complete understanding - - writecache: - enabled: true - small_object_size: 16k # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes - max_object_size: 134217728 # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes - flush_worker_count: 30 # number of write-cache flusher threads - - metabase: - perm: 0o644 # permissions for metabase files(directories: +x for current user and group) - max_batch_size: 200 - max_batch_delay: 20ms - - pilorama: - max_batch_delay: 5ms # maximum delay for a batch of operations to be executed - max_batch_size: 100 # maximum amount of operations in a single batch - - compression: - enabled: false # turn on/off zstd compression of stored objects - small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes - - blobstor: - - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) - depth: 1 # max depth of object tree storage in key-value DB - width: 4 # max width of object tree storage in key-value DB - opened_cache_capacity: 50 # maximum number of opened database files - opened_cache_ttl: 5m # ttl for opened database file - opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) - depth: 5 # max depth of object tree storage in FS - - gc: - remover_batch_size: 200 # number of objects to be removed by the garbage collector - remover_sleep_interval: 5m # frequency of the garbage collector invocation - - 0: - mode: read-only # mode of the shard, must be one of the: - # read-write (default) - # read-only - # degraded - # degraded-read-only - # disabled (do not work with the shard, allows to not remove it from the config) - resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding - resync_metabase_worker_count: 100 - - writecache: - enabled: false - no_sync: true - path: tmp/0/cache # write-cache root directory - capacity: 3221225472 # approximate write-cache total size, bytes - max_object_count: 49 - page_size: 4k - max_flushing_objects_size: 100b - - metabase: - path: tmp/0/meta # metabase path - max_batch_size: 100 - max_batch_delay: 10ms - - compression: - enabled: true # turn on/off zstd compression of stored objects - level: fastest - exclude_content_types: - - audio/* - - video/* - estimate_compressibility: true - estimate_compressibility_threshold: 0.7 - - blobstor: - - type: blobovnicza - path: tmp/0/blob/blobovnicza - init_worker_count: 10 #count of workers to initialize blobovniczas - rebuild_drop_timeout: 30s # timeout before drop single blobovnicza - opened_cache_ttl: 1m - opened_cache_exp_interval: 30s - - type: fstree - path: tmp/0/blob # blobstor path - - pilorama: - path: tmp/0/blob/pilorama.db # path to the pilorama database. If omitted, `pilorama.db` file is created blobstor.path - max_batch_delay: 10ms - max_batch_size: 200 - - gc: - remover_batch_size: 150 # number of objects to be removed by the garbage collector - remover_sleep_interval: 2m # frequency of the garbage collector invocation - expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector - expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector - - limits: - read: - max_running_ops: 10000 - max_waiting_ops: 1000 - idle_timeout: 30s - tags: - - tag: internal - weight: 20 - limit_ops: 0 - reserved_ops: 1000 - - tag: client - weight: 70 - reserved_ops: 10000 - - tag: background - weight: 5 - limit_ops: 10000 - reserved_ops: 0 - - tag: writecache - weight: 5 - limit_ops: 25000 - - tag: policer - weight: 5 - limit_ops: 25000 - prohibited: true - - tag: treesync - weight: 5 - limit_ops: 25 - write: - max_running_ops: 1000 - max_waiting_ops: 100 - idle_timeout: 45s - tags: - - tag: internal - weight: 200 - limit_ops: 0 - reserved_ops: 100 - - tag: client - weight: 700 - reserved_ops: 1000 - - tag: background - weight: 50 - limit_ops: 1000 - reserved_ops: 0 - - tag: writecache - weight: 50 - limit_ops: 2500 - - tag: policer - weight: 50 - limit_ops: 2500 - - tag: treesync - weight: 50 - limit_ops: 100 - - 1: - writecache: - path: tmp/1/cache # write-cache root directory - capacity: 4 G # approximate write-cache total size, bytes - - metabase: - path: tmp/1/meta # metabase path - - blobstor: - - type: blobovnicza - path: tmp/1/blob/blobovnicza - - type: fstree - path: tmp/1/blob # blobstor path - no_sync: true - - pilorama: - path: tmp/1/blob/pilorama.db - no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0o644 # permission to use for the database file and intermediate directories - -tracing: - enabled: true - exporter: "otlp_grpc" - endpoint: "localhost" - trusted_ca: "" - attributes: - - key: key0 - value: value - - key: key1 - value: value - -runtime: - soft_memory_limit: 1gb - -audit: - enabled: true - -multinet: - enabled: true - subnets: - - mask: 192.168.219.174/24 - source_ips: - - 192.168.218.185 - - 192.168.219.185 - - mask: 10.78.70.74/24 - source_ips: - - 10.78.70.185 - - 10.78.71.185 - balancer: roundrobin - restrict: false - fallback_delay: 350ms - -qos: - critical: - authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - internal: - authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag - - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 - - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json deleted file mode 100644 index b68ce4fa3..000000000 --- a/dev/.vscode-example/launch.json +++ /dev/null @@ -1,269 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - { - "name": "IR", - "type": "go", - "request": "launch", - "mode": "debug", - "program": "cmd/frostfs-ir", - "env": { - "FROSTFS_IR_LOGGER_LEVEL":"info", - "FROSTFS_IR_WALLET_PATH":"${workspaceFolder}/dev/ir/az.json", - "FROSTFS_IR_WALLET_ADDRESS":"Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn", - "FROSTFS_IR_WALLET_PASSWORD":"one", - "FROSTFS_IR_WITHOUT_MAINNET":"true", - "FROSTFS_IR_MORPH_ENDPOINT_CLIENT_0_ADDRESS":"ws://127.0.0.1:30333/ws", - "FROSTFS_IR_MORPH_VALIDATORS":"02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", - "FROSTFS_IR_TIMERS_EMIT":"50", - "FROSTFS_IR_TIMERS_STOP_ESTIMATION_MUL":"1", - "FROSTFS_IR_TIMERS_STOP_ESTIMATION_DIV":"4", - "FROSTFS_IR_TIMERS_COLLECT_BASIC_INCOME_MUL":"1", - "FROSTFS_IR_TIMERS_COLLECT_BASIC_INCOME_DIV":"2", - "FROSTFS_IR_TIMERS_DISTRIBUTE_BASIC_INCOME_MUL":"3", - "FROSTFS_IR_TIMERS_DISTRIBUTE_BASIC_INCOME_DIV":"4", - "FROSTFS_IR_EMIT_STORAGE_AMOUNT":"1000000000", - "FROSTFS_IR_NETMAP_CLEANER_ENABLED":"true", - "FROSTFS_IR_NETMAP_CLEANER_THRESHOLD":"3", - "FROSTFS_IR_LOCODE_DB_PATH":"${workspaceFolder}/.cache/locode_db", - "FROSTFS_IR_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8090", - "FROSTFS_IR_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-ir-state" - }, - "postDebugTask": "env-down" - }, - { - "name": "Storage node 1", - "type": "go", - "request": "launch", - "mode": "debug", - "program": "cmd/frostfs-node", - "env": { - "FROSTFS_LOGGER_LEVEL":"debug", - "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", - "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", - "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json", - "FROSTFS_NODE_WALLET_PASSWORD":"", - "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080", - "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8080", - "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8081", - "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a", - "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev", - "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW", - "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s1-state", - "FROSTFS_TREE_ENABLED":"true", - "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s1/wc0", - "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s1/meta0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s1/blobovnicza0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s1/fstree0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama0", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s1/wc1", - "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s1/meta1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s1/blobovnicza1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s1/fstree1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama1", - "FROSTFS_PROMETHEUS_ENABLED":"true", - "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9090", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", - "FROSTFS_TRACING_ENABLED":"true", - "FROSTFS_TRACING_EXPORTER":"otlp_grpc", - "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", - "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", - "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8080" - }, - "postDebugTask": "env-down" - }, - { - "name": "Storage node 2", - "type": "go", - "request": "launch", - "mode": "debug", - "program": "cmd/frostfs-node", - "env": { - "FROSTFS_LOGGER_LEVEL":"debug", - "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", - "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", - "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json", - "FROSTFS_NODE_WALLET_PASSWORD":"", - "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082", - "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8082", - "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8083", - "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a", - "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev", - "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW", - "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s2-state", - "FROSTFS_TREE_ENABLED":"true", - "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s2/wc0", - "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s2/meta0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s2/blobovnicza0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s2/fstree0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama0", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s2/wc1", - "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s2/meta1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s2/blobovnicza1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s2/fstree1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama1", - "FROSTFS_PROMETHEUS_ENABLED":"true", - "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9091", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", - "FROSTFS_TRACING_ENABLED":"true", - "FROSTFS_TRACING_EXPORTER":"otlp_grpc", - "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", - "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", - "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8082" - }, - "postDebugTask": "env-down" - }, - { - "name": "Storage node 3", - "type": "go", - "request": "launch", - "mode": "debug", - "program": "cmd/frostfs-node", - "env": { - "FROSTFS_LOGGER_LEVEL":"debug", - "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", - "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", - "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json", - "FROSTFS_NODE_WALLET_PASSWORD":"", - "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084", - "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8084", - "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8085", - "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a", - "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev", - "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW", - "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s3-state", - "FROSTFS_TREE_ENABLED":"true", - "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s3/wc0", - "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s3/meta0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s3/blobovnicza0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s3/fstree0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama0", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s3/wc1", - "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s3/meta1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s3/blobovnicza1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s3/fstree1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama1", - "FROSTFS_PROMETHEUS_ENABLED":"true", - "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9092", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", - "FROSTFS_TRACING_ENABLED":"true", - "FROSTFS_TRACING_EXPORTER":"otlp_grpc", - "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", - "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", - "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8084" - }, - "postDebugTask": "env-down" - }, - { - "name": "Storage node 4", - "type": "go", - "request": "launch", - "mode": "debug", - "program": "cmd/frostfs-node", - "env": { - "FROSTFS_LOGGER_LEVEL":"debug", - "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", - "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", - "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json", - "FROSTFS_NODE_WALLET_PASSWORD":"", - "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086", - "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8086", - "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8087", - "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a", - "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev", - "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW", - "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s4-state", - "FROSTFS_TREE_ENABLED":"true", - "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s4/wc0", - "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s4/meta0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s4/blobovnicza0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s4/fstree0", - "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama0", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true", - "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s4/wc1", - "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s4/meta1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s4/blobovnicza1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s4/fstree1", - "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2", - "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama1", - "FROSTFS_PROMETHEUS_ENABLED":"true", - "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9093", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", - "FROSTFS_TRACING_ENABLED":"true", - "FROSTFS_TRACING_EXPORTER":"otlp_grpc", - "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", - "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", - "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8086" - }, - "postDebugTask": "env-down" - } - ], - "compounds": [ - { - "name": "IR+Storage node", - "configurations": ["IR", "Storage node 1"], - "preLaunchTask": "env-up", - "stopAll": true - }, - { - "name": "IR + 4 storage nodes", - "configurations": ["IR", "Storage node 1", "Storage node 2", "Storage node 3", "Storage node 4"], - "preLaunchTask": "env-up", - "stopAll": true - } - ] -} diff --git a/dev/.vscode-example/tasks.json b/dev/.vscode-example/tasks.json deleted file mode 100644 index 0ccd9d110..000000000 --- a/dev/.vscode-example/tasks.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "version": "2.0.0", - "tasks": [ - { - "type": "shell", - "label": "env-up", - "command": "make env-up", - "group": "build", - "detail": "Up debug environment" - }, - { - "type": "shell", - "label": "env-down", - "command": "make env-down", - "group": "build", - "detail": "Down debug environment" - } - ] -} diff --git a/dev/adm/frostfs-adm.yml b/dev/adm/frostfs-adm.yml deleted file mode 100644 index e75cc27f0..000000000 --- a/dev/adm/frostfs-adm.yml +++ /dev/null @@ -1,19 +0,0 @@ -rpc-endpoint: http://127.0.0.1:30333 -alphabet-wallets: ./dev/ir -network: - max_object_size: 5242880 - epoch_duration: 60 - basic_income_rate: 100000000 - homomorphic_hash_disabled: false - maintenance_mode_allowed: true - max_ec_data_count: 12 - max_ec_parity_count: 4 - fee: - audit: 10000 - candidate: 10000000000 - container: 0 - container_alias: 0 - withdraw: 100000000 -credentials: - az: "one" - contract: "one" diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml deleted file mode 100644 index 40ed35aeb..000000000 --- a/dev/docker-compose.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- - -version: "2.4" -services: - neo-go: - image: nspccdev/neo-go:0.106.0 - container_name: neo-go - command: ["node", "--config-path", "/config", "--privnet", "--debug"] - stop_signal: SIGKILL - ports: - - 30333:30333 - volumes: - - ./neo-go/protocol.privnet.yml:/config/protocol.privnet.yml - - ./neo-go/node-wallet.json:/wallets/node-wallet.json - - ./neo-go/config.yml:/wallets/config.yml - - ./neo-go/wallet.json:/wallets/wallet.json - jaeger: - image: jaegertracing/all-in-one:latest - container_name: jaeger - ports: - - '4317:4317' #OTLP over gRPC - - '4318:4318' #OTLP over HTTP - - '16686:16686' #frontend - stop_signal: SIGKILL - environment: - - COLLECTOR_OTLP_ENABLED=true - - SPAN_STORAGE_TYPE=badger - - BADGER_EPHEMERAL=true diff --git a/dev/empty_pass.yml b/dev/empty_pass.yml deleted file mode 100644 index 1986cf9e4..000000000 --- a/dev/empty_pass.yml +++ /dev/null @@ -1 +0,0 @@ -password: "" diff --git a/dev/ir/az.json b/dev/ir/az.json deleted file mode 100644 index 8e88b432c..000000000 --- a/dev/ir/az.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "version": "3.0", - "name":null, - "accounts": [ - { - "address": "Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "single", - "contract": { - "script": "DCECs2Ir9AF73+MXxYrtX0x1PyBrfbiWBG+n13S7xL9/jcJBVuezJw==", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "extra":null, - "isDefault": false - }, - { - "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "consensus", - "contract": { - "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "extra":null, - "isDefault": false - }, - { - "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "committee", - "contract": { - "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "extra":null, - "isDefault": true - } - ], - "scrypt": { - "n": 16384, - "r": 8, - "p": 8 - }, - "extra": { - "Tokens": null - } -} diff --git a/dev/ir/contract.json b/dev/ir/contract.json deleted file mode 100644 index 310b77bd7..000000000 --- a/dev/ir/contract.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": "3.0", - "accounts": [ - { - "address": "Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "", - "contract": { - "script": "DCECs2Ir9AF73+MXxYrtX0x1PyBrfbiWBG+n13S7xL9/jcJBVuezJw==", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "isDefault": false - } - ], - "scrypt": { - "n": 16384, - "r": 8, - "p": 8 - }, - "extra": { - "Tokens": null - } -} diff --git a/dev/neo-go/config.yml b/dev/neo-go/config.yml deleted file mode 100644 index 7b4bb29d7..000000000 --- a/dev/neo-go/config.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -Path: "/wallets/node-wallet.json" -Password: "one" diff --git a/dev/neo-go/node-wallet.json b/dev/neo-go/node-wallet.json deleted file mode 100644 index 8e88b432c..000000000 --- a/dev/neo-go/node-wallet.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "version": "3.0", - "name":null, - "accounts": [ - { - "address": "Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "single", - "contract": { - "script": "DCECs2Ir9AF73+MXxYrtX0x1PyBrfbiWBG+n13S7xL9/jcJBVuezJw==", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "extra":null, - "isDefault": false - }, - { - "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "consensus", - "contract": { - "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "extra":null, - "isDefault": false - }, - { - "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP", - "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", - "label": "committee", - "contract": { - "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "extra":null, - "isDefault": true - } - ], - "scrypt": { - "n": 16384, - "r": 8, - "p": 8 - }, - "extra": { - "Tokens": null - } -} diff --git a/dev/neo-go/protocol.privnet.yml b/dev/neo-go/protocol.privnet.yml deleted file mode 100644 index 8aaf774a3..000000000 --- a/dev/neo-go/protocol.privnet.yml +++ /dev/null @@ -1,48 +0,0 @@ -ProtocolConfiguration: - Magic: 15405 - MaxTraceableBlocks: 200000 - TimePerBlock: 1s - MemPoolSize: 50000 - StandbyCommittee: - - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 - ValidatorsCount: 1 - SeedList: - - 0.0.0.0:20333 - VerifyTransactions: true - StateRootInHeader: true - P2PSigExtensions: true - -ApplicationConfiguration: - SkipBlockVerification: false - DBConfiguration: - Type: "boltdb" - BoltDBOptions: - FilePath: "./db/morph.bolt" - P2P: - Addresses: - - ":20333" - DialTimeout: 3s - ProtoTickInterval: 2s - PingInterval: 30s - PingTimeout: 90s - MaxPeers: 10 - AttemptConnPeers: 5 - MinPeers: 0 - Relay: true - Consensus: - Enabled: true - UnlockWallet: - Path: "./wallets/node-wallet.json" - Password: "one" - RPC: - Addresses: - - "0.0.0.0:30333" - Enabled: true - SessionEnabled: true - EnableCORSWorkaround: false - MaxGasInvoke: 100 - P2PNotary: - Enabled: true - UnlockWallet: - Path: "./wallets/node-wallet.json" - Password: "one" diff --git a/dev/neo-go/wallet.json b/dev/neo-go/wallet.json deleted file mode 100644 index ce68d604c..000000000 --- a/dev/neo-go/wallet.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": "3.0", - "accounts": [ - { - "address": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM", - "key": "6PYP7YrwGnLuu4WYQbEe3WJiC44aKmqwqawLsp7H3oh5vocS9xTv2ZfTp3", - "label": "", - "contract": { - "script": "DCEDGmxvu98CyjUXRfqGubpalFLXhaxPf8K3VIyipGxPz0pBVuezJw==", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "isdefault": false - } - ], - "scrypt": { - "n": 16384, - "r": 8, - "p": 8 - }, - "extra": { - "Tokens": null - } -} diff --git a/dev/storage/wallet01.json b/dev/storage/wallet01.json deleted file mode 100644 index e5b6bb371..000000000 --- a/dev/storage/wallet01.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "version":"3.0", - "accounts":[ - { - "address":"NejLbQpojKJWec4NQRMBhzsrmCyhXfGJJe", - "key":"6PYSS8ccmBcttfcw2YJh8VcNSoeQbQLuJLQ7HoKeYF5roRmGs9LUvmKcWz", - "label":"", - "contract":{ - "script":"DCECK7QEHFDWB/+HHex+TNd3g4jg6mhJ2EzL2aqPMuFqgTFBVuezJw==", - "parameters":[ - { - "name":"parameter0", - "type":"Signature" - } - ], - "deployed":false - }, - "lock":false, - "extra":null, - "isDefault":true - } - ], - "name":null, - "scrypt":{ - "n":16384, - "r":8, - "p":8 - }, - "extra":{ - "Tokens":null - } - } diff --git a/dev/storage/wallet02.json b/dev/storage/wallet02.json deleted file mode 100644 index 9c073deef..000000000 --- a/dev/storage/wallet02.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version":"3.0", - "accounts":[ - { - "address":"NVXXy3hNTvwVEZa2dAibALyJB3Q86aiHvL", - "key":"6PYXd9hxMYfaCkgeZp3q1RoMB921RQFkRxYftcacTJ2S7MUwnivrxi6Yk5", - "label":"", - "contract":{ - "script":"DCED/2W2rnkTSk3OnQ0504Uem6tO6Xq/hugeHFu8UM0oJq5BVuezJw==", - "parameters":[ - { - "name":"parameter0", - "type":"Signature" - } - ], - "deployed":false - }, - "lock":false, - "isDefault":false - } - ], - "scrypt":{ - "n":16384, - "r":8, - "p":8 - }, - "extra":{ - "Tokens":null - } - } diff --git a/dev/storage/wallet03.json b/dev/storage/wallet03.json deleted file mode 100644 index c054a3160..000000000 --- a/dev/storage/wallet03.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version":"3.0", - "accounts":[ - { - "address":"NPTmih9X14Y7xLvmD6RVtDHdH1Y9qJwoTe", - "key":"6PYXNeQzge9fWztVnWYRbr5Mh9q1y4npKVARHYGb484Hct1iNd3vXGR1kk", - "label":"", - "contract":{ - "script":"DCECrJIM198LYbKJBy5rlG4tpOGjG5qxxiG7R14w+kqxAsNBVuezJw==", - "parameters":[ - { - "name":"parameter0", - "type":"Signature" - } - ], - "deployed":false - }, - "lock":false, - "isDefault":false - } - ], - "scrypt":{ - "n":16384, - "r":8, - "p":8 - }, - "extra":{ - "Tokens":null - } - } diff --git a/dev/storage/wallet04.json b/dev/storage/wallet04.json deleted file mode 100644 index cb4676df6..000000000 --- a/dev/storage/wallet04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version":"3.0", - "accounts":[ - { - "address":"Ne2DAQbWvP1s7TbtFc7BStKMnjKJdBaVRm", - "key":"6PYWCsGWx8uSVYK94tvK7Ccit8x8Z3f3dHADTFTgLhT9NBXTBqBECL8AyC", - "label":"", - "contract":{ - "script":"DCEDjIYpWeVrQ+IPeRh8T+ngvHyMZsFgPmzw7H+Hq2sI3DVBVuezJw==", - "parameters":[ - { - "name":"parameter0", - "type":"Signature" - } - ], - "deployed":false - }, - "lock":false, - "isDefault":false - } - ], - "scrypt":{ - "n":16384, - "r":8, - "p":8 - }, - "extra":{ - "Tokens":null - } - } diff --git a/dev/wallet.json b/dev/wallet.json deleted file mode 100644 index ce68d604c..000000000 --- a/dev/wallet.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": "3.0", - "accounts": [ - { - "address": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM", - "key": "6PYP7YrwGnLuu4WYQbEe3WJiC44aKmqwqawLsp7H3oh5vocS9xTv2ZfTp3", - "label": "", - "contract": { - "script": "DCEDGmxvu98CyjUXRfqGubpalFLXhaxPf8K3VIyipGxPz0pBVuezJw==", - "parameters": [ - { - "name": "parameter0", - "type": "Signature" - } - ], - "deployed": false - }, - "lock": false, - "isdefault": false - } - ], - "scrypt": { - "n": 16384, - "r": 8, - "p": 8 - }, - "extra": { - "Tokens": null - } -} diff --git a/docs/authentication.md b/docs/authentication.md deleted file mode 100644 index 3fe5ca512..000000000 --- a/docs/authentication.md +++ /dev/null @@ -1,70 +0,0 @@ -# Authentication and signatures - -## General overview - -![Auth general overview](images/authentication/authoverview.svg) - -## Signatures - -Every message in the FrostFS network is signed. -Each signature consists of: -1. Scheme -2. Public key -3. Signature - -If signature check fails, operation is aborted and the error is returned to the user. - -### Schemes -Currently, 3 schemes are defined in the [frostfs-api](https://git.frostfs.info/TrueCloudLab/frostfs-api/src/commit/4bae9dd78abcf1a358a65a45fe7303e37fd98099/refs/types.proto#L105): - -#### ECDSA - -Defined in section 6 of [FIPS 186](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf). -Implemented in the Go stdlib. -This is the primary algorithm used to sign and verify requests. -The hash algorithm used is SHA-512. - -#### RFC6979 - -[RFC 6979](https://www.rfc-editor.org/rfc/rfc6979) defines deterministic algorithm for ECDSA signatures. -It it used primarily used by neo-go and allows us to perform signature checks inside the contract, such as for container. -The hash algorithm used is SHA-256 - -### Public key - -ECDSA public key corresponding to the private key being used to sign a message. -It is the primary user identity and is used to determine the request originator. - -## Tokens - -Generally, the request owner, i.e. an account all access control checks are applied to -is taken from the request signature. -However, session and bearer tokens can alter authentication process by making "effective" request owner differ from the actual one. -The general scheme is given by the following picture: - -![Token processing](images/authentication/impersonate.svg) - -It is important to note, that the token is only valid when the request signature corresponds to the actor token is issued to. - -### Session token - -Session token can override the rules of determining request owner. -It is defined in the [frostfs-api](https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/session/types.proto#L89). -If user A signs a session token for user B, then user B can sign request with its own key, while the node will still process the request as if it was originated from user A. -This is used, for example, when putting objects in system: -1. User creates a session with node, recevieving session token. -2. User signs session token for a freshly generated key, stored on a storage node. -3. User sends raw stream of bytes, while the node signs created objects with the session key. This way other nodes can validate the object owned by user, even though it is signed by a different key. - -Session token may have some restrictions: -1. Lifetime, effectively an epoch after which it becomes invalid. -2. Set of operations it applies to. -3. The entity it is given to. This is provided in `session_key` field containing the public key. - -### Bearer token - -Bearer token is generally used for access control but can also affect authentication if `allow_impersonate` flag is set. With this flag it behaves similarly to session token. - -## FrostFS ID - -## APE diff --git a/docs/epoch.md b/docs/epoch.md deleted file mode 100644 index 82fbb9235..000000000 --- a/docs/epoch.md +++ /dev/null @@ -1,62 +0,0 @@ -# Epoch - -The main purpose of the `epoch` in `frostfs` environment is to manipulate `netmap`. -Each new epoch, `ir` service trigger revision content of the `netmap` by adding or removing nodes to or from it. -`node` service trigger few internal processes each new epoch - for example, running GC. -Epoch also used in an object lifecycle. - -At the startup, `ir` service initializes an epoch timer which handles new epoch tick. -Epoch timer is a block timer - which means that this timer ticks each block or set of blocks. -The epoch duration stores in the configurable parameter `EpochDuration` in the blockchain. -It is possible to get it via `frostfs-adm`: -```shell -> frostfs-adm morph dump-config -c config.yml -r http://morph-chain.frostfs.devenv:30333 -... -EpochDuration: 240 (int) -... -> -``` -Once epoch timer ticks, `ir` service call method [NewEpoch](https://git.frostfs.info/TrueCloudLab/frostfs-contract/src/commit/a1b61d3949581f4d65b0d32a33d98ba9c193dc2a/netmap/netmap_contract.go#L238) -of the `netmap` contract. Each `ir` instance can do this at the same time, but it is not an issue, -because multiple call of this method with the same set of parameters will give us the same result. - -Utility `frostfs-adm` have a command to trigger new epoch: -```shell -> frostfs-adm morph force-new-epoch -c config.yml -r http://morph-chain.frostfs.devenv:30333 -``` -Command goes directly to the `netmap` contract and call method `NewEpoch`. -Method checks alphabet witness and stores candidates nodes which are not in the `OFFLINE` state as a current netmap. -Then executes method `NewEpoch` in `balance` and `container` contracts. -At the end it produces notification `NewEpoch` which is handled by `node` and `ir` services. - -`ir` handler for `NewEpoch` updates internal state of the netmap, if it is necessary, updates state of the nodes or -marks for exclusion from netmap in the blockchain. - -`node` handler for `NewEpoch` executes method `addPeer` of the `netmap` contract. -This method do nothing, but produces notification which handled by `ir` service. -`ir` in handler for `AddPeer` may update node state in the netmap if it is necessary. - -At the startup, node bootstraps with state `ONLINE`. From the online state, it is possible to move to `MAINTENANCE` or `OFFLINE`. -Node moved to `OFFLINE` state automatically, when there is no bootstrap request from it for a number of epochs. -This number stored in the `ir` config `netmap_cleaner.threshold`. -From `OFFLINE` state node, once it bootstrapped, moves to `ONLINE`. -`MAINTENANCE` state persists even if node rebooted or unavailable for a few epochs. - -## Local netmap view - -Application of a storage policy to the netmap produces a subset of nodes -used to select candidates for object placement and retrieval. To dynamically -adapt to node info changes, `frostfs-node` merges info in the netmap with -candidates list. This affects object, control, container and tree services and -functionality of replicator and policer. - -To adjust frequency of merging candidates with netmap in local cache, set duration -in `netmap.candidates.poll_interval` of `node` config section. - -During the merge, the following node parameters are overriden: -- Node status; -- Node endpoints (external and internal values). - -The node does not perform the following actions, as they affect object placement: -- Adding new items to the network map; -- Overriding node attributes. diff --git a/docs/evacuation.md b/docs/evacuation.md deleted file mode 100644 index d47d56d15..000000000 --- a/docs/evacuation.md +++ /dev/null @@ -1,115 +0,0 @@ -# Shard data evacuation - -## Overview - -Evacuation is the process of transferring data from one shard to another. Evacuation is used in case of problems with the shard in order to save data. - -To start the evacuation, it is necessary that the shard is in read-only mode (read more [here](./shard-modes.md)). - -First of all, by the evacuation the data is transferred to other shards of the same node; if it is not possible, then the data is transferred to other nodes. - -Only one running evacuation process is allowed on the node at a time. - -It is not necessary to turn maintenance mode on storage node. - -Once evacuation from shard started, it is impossible to read data from it via public API, except the case when evacuation stopped manually or node restarted. - -Because it is necessary to prevent removing by policer objects with policy `REP 1 ...` from remote node during evacuation. - -`frostfs-cli` utility is used to manage evacuation. - -## Commands - -`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. -By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`). -To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`. -To adjust resource consumption required for evacuation use options: - - `--container-worker-count` count of concurrent container evacuation workers - - `--object-worker-count` count of concurrent object evacuation workers - -`frostfs-cli control shards evacuation stop` stops running evacuation process. - -`frostfs-cli control shards evacuation status` prints evacuation process status. - -`frostfs-cli control shards evacuation reset` resets evacuation process status. - -See commands `--help` output for detailed description. - -## Examples - -### Set shard mode to read only -```bash -frostfs-cli control shards set-mode --mode read-only --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --id 8kEBwtvKLU3Hva3PaaodUi -Enter password > -Shard mode update request successfully sent. -``` - -### Start evacuation and get status -```bash -frostfs-cli control shards evacuation start --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --id 8kEBwtvKLU3Hva3PaaodUi -Enter password > -Shard evacuation has been successfully started. - -frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json -Enter password > -Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: running. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:00:03. Estimated time left: 2 minutes. - -frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json -Enter password > -Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: running. Evacuated 260 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:01:05. Estimated time left: 1 minutes. - -frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json -Enter password > -Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: completed. Evacuated 618 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 19 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:02:13. -``` - -### Stop running evacuation process -```bash -frostfs-cli control shards evacuation start --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --id 54Y8aot9uc7BSadw2XtYr3 -Enter password > -Shard evacuation has been successfully started. - -frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json -Enter password > -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:15:47Z UTC. Duration: 00:00:03. Estimated time left: 0 minutes. - -frostfs-cli control shards evacuation stop --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json -Enter password > -Evacuation stopped. - -frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json -Enter password > -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: completed. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Error: context canceled. Started at: 2023-05-10T10:15:47Z UTC. Duration: 00:00:07. -``` - -### Start evacuation and await it completes -```bash -frostfs-cli control shards evacuation start --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --id 54Y8aot9uc7BSadw2XtYr3 --await -Enter password > -Shard evacuation has been successfully started. -Progress will be reported every 5 seconds. -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:04. Estimated time left: 0 minutes. -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 343 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:09. Estimated time left: 0 minutes. -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 545 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:14. Estimated time left: 0 minutes. -Shard evacuation has been completed. -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Evacuated 618 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 19 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:14. -``` - -### Start evacuation and await it completes without progress notifications -```bash -frostfs-cli control shards evacuation start --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --id 54Y8aot9uc7BSadw2XtYr3 --await --no-progress -Enter password > -Shard evacuation has been successfully started. -Shard evacuation has been completed. -Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Evacuated 618 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 19 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:20:00Z UTC. Duration: 00:00:14. -``` - -### Start trees evacuation and await it completes -```bash -frostfs-cli control shards evacuation start --id FxR6QujButNCHn7jjdhxGP --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --await --scope trees -Enter password > -Shard evacuation has been successfully started. -Progress will be reported every 5 seconds. -Shard evacuation has been completed. -Shard IDs: FxR6QujButNCHn7jjdhxGP. Evacuated 0 objects out of 0, failed to evacuate: 0, skipped: 0; evacuated 2 trees out of 2, failed to evacuate: 0. Started at: 2024-02-08T08:44:17Z UTC. Duration: 00:00:00. -``` diff --git a/docs/images/authentication/authoverview.puml b/docs/images/authentication/authoverview.puml deleted file mode 100644 index 20cac9f52..000000000 --- a/docs/images/authentication/authoverview.puml +++ /dev/null @@ -1,28 +0,0 @@ -@startuml authoverview -!include -!include -AddElementTag("smart-contract", $bgColor=#0abab5) - -Person(user, "User", "User with private key") - -Container_Boundary(stor, "FrostFS Storage") { - Component(verify, "Sign Service", $descr="Check request signature") - Component(apesvc, "APE Service") - Component(objsvc, "Object service") -} - -Container_Boundary(neogo, "Blockchain") { - Interface "NeoGo" - Component(ffsid, "FrostFS ID", $tags="smart-contract", $descr="Stores namespaces and users") - Component(policy, "Policy", $tags="smart-contract", $descr="Stores APE rules") -} - -Rel_R(user, verify, "Requests", "gRPC") -Rel_R(verify, apesvc, "Access control") -Rel_R(apesvc, objsvc, "Operation") -Rel_D(apesvc, NeoGo, "Get data to validate request") -Rel("NeoGo", ffsid, "Fetch users") -Rel("NeoGo", policy, "Fetch policies") - -SHOW_LEGEND(true) -@enduml diff --git a/docs/images/authentication/authoverview.svg b/docs/images/authentication/authoverview.svg deleted file mode 100644 index a34a68da0..000000000 --- a/docs/images/authentication/authoverview.svg +++ /dev/null @@ -1 +0,0 @@ -FrostFS Storage[Container]Blockchain[Container]Sign Service Check request signatureAPE ServiceObject serviceNeoGoFrostFS ID Stores namespaces andusersPolicy Stores APE rulesUser User with private keyRequests[gRPC]Access controlOperationGet data to validaterequestFetch usersFetch policiesLegend  person  component  container boundary(dashed)  smart-contract(last text color)  \ No newline at end of file diff --git a/docs/images/authentication/impersonate.puml b/docs/images/authentication/impersonate.puml deleted file mode 100644 index f0a5436f9..000000000 --- a/docs/images/authentication/impersonate.puml +++ /dev/null @@ -1,15 +0,0 @@ -@startuml impersonate -start - -if (The request has bearer token with allow_impersonate=true?) then (yes) - :Treat bearer token issuer as the request owner.; - end -(no) elseif (The request has session token?) then (yes) - :Treat session token issuer as the request owner.; - end -else (no) - :Determine request owner from the request signature.; - end -endif - -@enduml diff --git a/docs/images/authentication/impersonate.svg b/docs/images/authentication/impersonate.svg deleted file mode 100644 index add2c5439..000000000 --- a/docs/images/authentication/impersonate.svg +++ /dev/null @@ -1 +0,0 @@ -yesThe request has bearer token with allow_impersonate=true?Treat bearer token issuer as the request owner.yesThe request has session token?nonoTreat session token issuer as the request owner.Determine request owner from the request signature. \ No newline at end of file diff --git a/docs/maintenance.md b/docs/maintenance.md deleted file mode 100644 index 27340c5d7..000000000 --- a/docs/maintenance.md +++ /dev/null @@ -1,58 +0,0 @@ -# Maintenance mode of storage nodes - -## Overview - -Storage nodes turn to maintenance mode (MM) while internal procedures are required -to troubleshoot storage service issues. Data on MM nodes MAY be temporarily -unavailable depending on the procedures carried out at the site. Otherwise, such -nodes do not differ from fully functional ones. Nodes independently carry out -the procedure for turning on and off the MM. - -## Reflection in the network map - -To globally notify the network about the start of maintenance procedures, the node -MAY set the corresponding state in the network map. The ability to set MM state -is determined by the global network setting. If MM is disallowed at the moment, -the node will not be able to set this state. Network setting's change is not -retroactive: if MM becomes disallowed, the nodes with MM state are not excluded -from the network map. - -To check the possibility of MM-switching at the moment, exec: -```shell -$ frostfs-cli netmap netinfo -... -Maintenance mode allowed: true -... -``` - -To toggle the setting in the running FrostFS Sidechain, exec: -```shell -$ frostfs-adm morph set-config MaintenanceModeAllowed=true|false -``` - -To switch the node to MM, exec: -```shell -$ frostfs-cli control set-status --status maintenance [--force|-f] -``` -`-f` flag allows to force local maintenance regardless of the network settings. - -To stop the maintenance, use the same command but with any other supported state. - -Note that the node starts maintenance instantly, while the network map state is changed -asynchronously: Inner Ring receives switch requests, asserts allowance according -to the current network settings, and updates the state of the candidate node for the -next network map. The change will take effect no earlier than the next epoch -in which a new version of the network map is released. - -## Object service - -Nodes under maintenance MAY fail operations of the FrostFS Object API. The node -maintained in the current repository always denies all object operations with -dedicated status `NODE_UNDER_MAINTENANCE`. - -## Data replication - -Data persistence after node maintenance is expected but not guaranteed. -In the basic case, the data replication mechanism would create backup replicas -of objects that should be stored on the MM-node. To reduce network load and -data operations, replicas on MM-nodes are a priori considered correct. diff --git a/docs/release-instruction.md b/docs/release-instruction.md deleted file mode 100644 index aa867e83c..000000000 --- a/docs/release-instruction.md +++ /dev/null @@ -1,130 +0,0 @@ -# Release instructions - -## Pre-release checks - -These should run successfully: - -* `make all`; -* `make test`; -* `make lint` (should not change any files); -* `make fmts` (should not change any files); -* `go mod tidy` (should not change any files); -* integration tests in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env). - -## Make release commit - -Use `vX.Y.Z` tag for releases and `vX.Y.Z-rc.N` for release candidates -following the [semantic versioning](https://semver.org/) standard. - -Determine the revision number for the release: - -```shell -$ export FROSTFS_REVISION=X.Y.Z[-rc.N] -$ export FROSTFS_TAG_PREFIX=v -``` - -Double-check the number: - -```shell -$ echo ${FROSTFS_REVISION} -``` - -Create release branch from the main branch of the origin repository: - -```shell -$ git checkout -b release/${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} -``` - -### Update versions - -Write new revision number into the root `VERSION` file: - -```shell -$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION -``` - -Update the supported version of `TrueCloudLab/frostfs-contract` module in root -`README.md` if needed. - -### Writing changelog - -Add an entry to the `CHANGELOG.md` following the style established there. - -* copy `Unreleased` section (next steps relate to section below `Unreleased`) -* replace `Unreleased` link with the new revision number -* update `Unreleased...new` and `new...old` diff-links at the bottom of the file -* add optional codename and release date in the heading -* remove all empty sections such as `Added`, `Removed`, etc. -* make sure all changes have references to relevant issues in `#123` format (if possible) -* clean up all `Unreleased` sections and leave them empty - -### Make release commit - -Stage changed files for commit using `git add`. Commit the changes: - -```shell -$ git commit -s -m 'Release '${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} -``` - -### Open pull request - -Push release branch: - -```shell -$ git push release/${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} -``` - -Open pull request to the main branch of the origin repository so that the -maintainers check the changes. Remove release branch after the merge. - -## Tag the release - -Pull the main branch with release commit created in previous step. Tag the commit -with PGP signature. - -```shell -$ git checkout master && git pull -$ git tag -s ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} -``` - -## Push the release tag - -```shell -$ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} -``` - -## Post-release - -### Prepare and push images to a Docker registry (automated) - -Create Docker images for all applications and push them into container registry -(executed automatically in Forgejo Actions upon pushing a release tag): - -```shell -$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} -$ make images -$ make push-images -``` - -### Make a proper release (if not automated) - -Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`. -Build and tar release binaries with `make prepare-release`, attach them to -the release. Publish the release. - -### Update FrostFS Developer Environment - -Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env) -with new versions. - -### Close milestone - -Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists. - -### Rebuild FrostFS LOCODE database - -If new release contains LOCODE-related changes, rebuild FrostFS LOCODE database via FrostFS CLI - -```shell -$ frostfs-cli util locode generate ... -``` diff --git a/docs/shard-modes.md b/docs/shard-modes.md deleted file mode 100644 index 6cc4ab13c..000000000 --- a/docs/shard-modes.md +++ /dev/null @@ -1,60 +0,0 @@ -# Shard modes description - -## List of modes - -Each mode is characterized by two important properties: -1. Whether modifying operations are allowed. -2. Whether metabase and write-cache is available. - The expected deployment scenario is to place both metabase and write-cache on an SSD drive thus these modes - can be approximately described as no-SSD modes. - -| Mode | Description | -|----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `read-write` | Default mode, all operations are allowed. | -| `read-only` | Read-only mode, only read operations are allowed, metabase is available. | -| `degraded` | Degraded mode in which metabase and write-cache is disabled. It shouldn't be used at all, because metabase can contain important indices, such as LOCK objects info and modifying operation in this mode can lead to unexpected behaviour. The purpose of this mode is to allow PUT/DELETE operations without the metabase if really necessary. | -| `degraded-read-only` | Same as `degraded`, but with only read operations allowed. This mode is used during SSD replacement and/or when the metabase error counter exceeds threshold. | -| `disabled` | Currently used only in config file to temporarily disable a shard. - -## Shard and Component Status - -| Shard Mode | Metabase Mode | Blobstore Mode | Writecache Mode | Pilorama Mode | Blobovnicza Tree Mode | FSTree Mode | -|-----------------------|---------------|----------------|-----------------|---------------|-----------------------|-------------| -| `Read-Write` | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | -| `Read-Only` | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | -| `Degraded-Read-Write` | CLOSED | READ_WRITE | CLOSED | CLOSED | READ_WRITE | READ_WRITE | -| `Degraded-Read-Only` | CLOSED | READ_ONLY | CLOSED | CLOSED | READ_ONLY | READ_ONLY | - -## Transition order - -Because each shard consists of multiple components changing its mode is not an atomic operation. -Instead, each component changes its mode independently. - -For transitions to `read-write` mode the order is: -1. `metabase` -2. `blobstor` -3. `writecache` -4. `pilorama` - -For transitions to all other modes the order is: -1. `writecache` -2. `blobstor` -3. `metabase` -4. `pilorama` - -The motivation is to avoid transient errors because write-cache can write to both blobstor and metabase. -Thus, when we want to _stop_ write operations, write-cache needs to change mode before them. -On the other side, when we want to _allow_ them, blobstor and metabase should be writable before write-cache is. - -If anything goes wrong in the middle, the mode of some components can be different from the actual mode of a shard. -However, all mode changing operations are idempotent. - -## Automatic mode changes - -A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. - -# Detach shard - -To detach a shard use `frostfs-cli control shards detach` command. This command removes the shards from the storage -engine and closes all resources associated with the shards. -Limitation: `SIGHUP` or storage node restart lead to detached shard will be again online. diff --git a/docs/sighup.md b/docs/sighup.md deleted file mode 100644 index 4243d620c..000000000 --- a/docs/sighup.md +++ /dev/null @@ -1,23 +0,0 @@ -# SIGHUP behaviour - -## Logger - -Logger level can be reloaded with a SIGHUP. - -## Storage engine - -Shards can be added, removed or reloaded with SIGHUP. -Each shard from the configuration is matched with existing shards by -comparing paths from `shard.blobstor` section. After this we have 3 sets: - -1. Shards that are missing from the configuration (or have `mode: disabled`) but are currently open. - These are closed. -2. Shards that are added. These are opened and initialized. -3. Shards that remain in the configuration. - For these shards we apply reload to a `metabase` only. If `resync_metabase` is true, the metabase is also resynchronized. - -### Metabase - -| Changed section | Actions | -|-----------------|----------------------------------------------------------------------------------------------------------------------| -| `path` | If `path` is different, metabase is closed and opened with a new path. All other configuration will also be updated. | diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md deleted file mode 100644 index da9fdfed0..000000000 --- a/docs/storage-node-configuration.md +++ /dev/null @@ -1,602 +0,0 @@ -# FrostFS Storage node configuration file - -This section contains detailed FrostFS Storage node configuration file description -including default config values and some tips to set up configurable values. - -There are some custom types used for brevity: -1. `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (milliseconds). -2. `size` -- string consisting of a number and a suffix. Suffix examples include `b` (bytes, default), `k` (kibibytes), `m` (mebibytes), `g` (gibibytes). -3. `file mode` -- octal number. Usually, it starts with `0` and contain 3 digits, corresponding to file access permissions for user, group and others. -4. `public key` -- hex-encoded public key -5. `hash160` -- hex-encoded 20-byte hash of a deployed contract. - -# Structure - -| Section | Description | -|--------------|---------------------------------------------------------| -| `node` | [Node parameters](#node-section) | -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | -| `qos` | [QoS configuration](#qos-section) | - -# `control` section -```yaml -control: - authorized_keys: - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - grpc: - endpoint: 127.0.0.1:8090 -``` -| Parameter | Type | Default value | Description | -|-------------------|----------------|---------------|----------------------------------------------------------------------------------| -| `authorized_keys` | `[]public key` | empty | List of public keys which are used to authorize requests to the control service. | -| `grpc.endpoint` | `string` | empty | Address that control service listener binds to. | - -# `grpc` section -```yaml -grpc: - - endpoint: localhost:8080 - tls: - enabled: true - certificate: /path/to/cert.pem - key: /path/to/key.pem - - endpoint: internal.ip:8080 - - endpoint: external.ip:8080 - tls: - enabled: true - use_insecure_crypto: true -``` -Contains an array of gRPC endpoint configurations. The following table describes the format of each -element. - -| Parameter | Type | Default value | Description | -|---------------------------|-------------------------------|---------------|---------------------------------------------------------------------------| -| `endpoint` | `[]string` | empty | Address that service listener binds to. | -| `tls` | [TLS config](#tls-subsection) | | Address that control service listener binds to. | - -## `tls` subsection - -| Parameter | Type | Default value | Description | -|-----------------------|----------|---------------|---------------------------------------------------------------------------| -| `enabled` | `bool` | `false` | Address that control service listener binds to. | -| `certificate` | `string` | | Path to the TLS certificate. | -| `key` | `string` | | Path to the key. | -| `use_insecure_crypto` | `bool` | `false` | If true, ciphers considered insecure by Go stdlib are allowed to be used. | - -# `pprof` section - -Contains configuration for the `pprof` profiler. - -| Parameter | Type | Default value | Description | -|--------------------|-----------------------------------|---------------|-----------------------------------------| -| `enabled` | `bool` | `false` | Flag to enable the service. | -| `address` | `string` | | Address that service listener binds to. | -| `shutdown_timeout` | `duration` | `30s` | Time to wait for a graceful shutdown. | -| `debug` | [Debug config](#debug-subsection) | | Optional profiles configuration | - - -## `debug` subsection - -Contains optional profiles configuration. - -| Parameter | Type | Default value | Description | -|--------------|-------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `block_rate` | `int` | `0` | Controls the block profiler. Non-positive values disable profiler reports. For more information: https://pkg.go.dev/runtime@go1.20.3#SetBlockProfileRate. | -| `mutex_rate` | `int` | `0` | Controls the mutex profiler. Non-positive values disable profiler reports. For more information: https://pkg.go.dev/runtime@go1.20.3#SetMutexProfileFraction. | - -# `prometheus` section - -Contains configuration for the `prometheus` metrics service. - -| Parameter | Type | Default value | Description | -|--------------------|------------|---------------|-----------------------------------------| -| `enabled` | `bool` | `false` | Flag to enable the service. | -| `address` | `string` | | Address that service listener binds to. | -| `shutdown_timeout` | `duration` | `30s` | Time to wait for a graceful shutdown. | - -# `logger` section -Contains logger parameters. - -```yaml -logger: - level: info - tags: - - names: "main, morph" - level: debug -``` - -| Parameter | Type | Default value | Description | -|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | -| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | - -## `tags` subsection -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | -| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | - -# `contracts` section -Contains override values for FrostFS side-chain contract hashes. Most of the time contract -hashes are fetched from the NNS contract, so this section can be omitted. - -```yaml -contracts: - balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd - container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c - netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca - proxy: ad7c6b55b737b696e5c82c85445040964a03e97f -``` - -| Parameter | Type | Default value | Description | -|--------------|-----------|---------------|---------------------------| -| `balance` | `hash160` | | Balance contract hash. | -| `container` | `hash160` | | Container contract hash. | -| `netmap` | `hash160` | | Netmap contract hash. | - -# `morph` section - -```yaml -morph: - dial_timeout: 30s - cache_ttl: 15s - ape_chain_cache_size: 10000 - rpc_endpoint: - - address: wss://rpc1.morph.frostfs.info:40341/ws - priority: 1 - - address: wss://rpc2.morph.frostfs.info:40341/ws - priority: 2 - switch_interval: 2m - netmap: - candidates: - poll_interval: 20s - ``` - -| Parameter | Type | Default value | Description | -|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | -| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | - -## `rpc_endpoint` subsection -| Parameter | Type | Default value | Description | -|------------|----------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `address` | `string` | | _WebSocket_ N3 endpoint. | -| `priority` | `int` | `1` | Priority of an endpoint. Endpoint with a higher priority (lower configuration value) has more chance of being used. Endpoints with equal priority are iterated over randomly; a negative priority is interpreted as `1`. | - -# `storage` section - -Local storage engine configuration. - -| Parameter | Type | Default value | Description | -|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| -| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | -| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | -| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | - -## `shard` subsection - -Contains configuration for each shard. Keys must be consecutive numbers starting from zero. -`default` subsection has the same format and specifies defaults for missing values. -The following table describes configuration for each shard. - -| Parameter | Type | Default value | Description | -| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | -| `compression` | [Compression config](#compression-subsection) | | Compression config. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | -| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | - -### `compression` subsection - -Contains compression config. - -```yaml -compression: - enabled: true - level: smallest_size - exclude_content_types: - - audio/* - - video/* - estimate_compressibility: true - estimate_compressibility_threshold: 0.7 -``` - -| Parameter | Type | Default value | Description | -| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `enabled` | `bool` | `false` | Flag to enable compression. | -| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | -| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | - -### `blobstor` subsection - -Contains a list of substorages each with it's own type. -Currently only 2 types are supported: `fstree` and `blobovnicza`. - -```yaml -blobstor: - - type: blobovnicza - path: /path/to/blobstor - depth: 1 - width: 4 - - type: fstree - path: /path/to/blobstor/blobovnicza - perm: 0o644 - size: 4194304 - depth: 1 - width: 4 - opened_cache_capacity: 50 - opened_cache_ttl: 5m - opened_cache_exp_interval: 15s -``` - -#### Common options for sub-storages -| Parameter | Type | Default value | Description | -|-------------------------------------|-----------------------------------------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `path` | `string` | | Path to the root of the blobstor. | -| `perm` | file mode | `0660` | Default permission for created files and directories. | - -#### `fstree` type options -| Parameter | Type | Default value | Description | -|---------------------|-----------|---------------|-------------------------------------------------------| -| `path` | `string` | | Path to the root of the blobstor. | -| `perm` | file mode | `0660` | Default permission for created files and directories. | -| `depth` | `int` | `4` | File-system tree depth. | - -#### `blobovnicza` type options -| Parameter | Type | Default value | Description | -|-----------------------------| ---------- |---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `path` | `string` | | Path to the root of the blobstor. | -| `perm` | file mode | `0660` | Default permission for created files and directories. | -| `size` | `size` | `1 G` | Maximum size of a single blobovnicza | -| `depth` | `int` | `2` | Blobovnicza tree depth. | -| `width` | `int` | `16` | Blobovnicza tree width. | -| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. | -| `opened_cache_ttl` | `duration` | `0` | TTL in cache for opened blobovniczas(disabled by default). In case of heavy random-read and 10 shards each with 10_000 databases and accessing 400 objects per-second we will access each db approximately once per ((10 * 10_000 / 400) = 250 seconds <= 300 seconds = 5 min). Also take in mind that in this scenario they will probably be closed earlier because of the cache capacity, so bigger values are likely to be of no use. | -| `opened_cache_exp_interval` | `duration` | `15s` | Cache cleanup interval for expired blobovnicza's. | -| `init_worker_count` | `int` | `5` | Maximum number of concurrent initialization workers. | -| `rebuild_drop_timeout` | `duration` | `10s` | Timeout before drop empty blobovnicza file during rebuild. | - -### `gc` subsection - -Contains garbage-collection service configuration. It iterates over the blobstor and removes object the node no longer needs. - -```yaml -gc: - remover_batch_size: 200 - remover_sleep_interval: 5m - expired_collector_batch_size: 500 - expired_collector_worker_count: 5 -``` - -| Parameter | Type | Default value | Description | -|-----------------------------------|------------|---------------|----------------------------------------------------------| -| `remover_batch_size` | `int` | `100` | Amount of objects to grab in a single batch. | -| `remover_sleep_interval` | `duration` | `1m` | Time to sleep between iterations. | -| `expired_collector_batch_size` | `int` | `500` | Max amount of expired objects to grab in a single batch. | -| `expired_collector_worker_count` | `int` | `5` | Max amount of concurrent expired objects workers. | - -### `metabase` subsection - -```yaml -metabase: - path: /path/to/meta.db - perm: 0o644 - max_batch_size: 200 - max_batch_delay: 20ms -``` - -| Parameter | Type | Default value | Description | -|-------------------|------------|---------------|------------------------------------------------------------------------| -| `path` | `string` | | Path to the metabase file. | -| `perm` | file mode | `0660` | Permissions to set for the database file. | -| `max_batch_size` | `int` | `1000` | Maximum amount of write operations to perform in a single transaction. | -| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. | - -### `writecache` subsection - -```yaml -writecache: - enabled: true - path: /path/to/writecache - capacity: 4294967296 - max_object_size: 134217728 - flush_worker_count: 30 -``` - -| Parameter | Type | Default value | Description | -| --------------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `path` | `string` | | Path to the metabase file. | -| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. | -| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. | -| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. | -| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | -| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | - -### `limits` subsection - -```yaml -limits: - max_read_running_ops: 10000 - max_read_waiting_ops: 1000 - max_write_running_ops: 1000 - max_write_waiting_ops: 100 - read: - - tag: internal - weight: 20 - limit_ops: 0 - reserved_ops: 1000 - - tag: client - weight: 70 - reserved_ops: 10000 - - tag: background - weight: 5 - limit_ops: 10000 - reserved_ops: 0 - - tag: writecache - weight: 5 - limit_ops: 25000 - - tag: policer - weight: 5 - limit_ops: 25000 - write: - - tag: internal - weight: 200 - limit_ops: 0 - reserved_ops: 100 - - tag: client - weight: 700 - reserved_ops: 1000 - - tag: background - weight: 50 - limit_ops: 1000 - reserved_ops: 0 - - tag: writecache - weight: 50 - limit_ops: 2500 - - tag: policer - weight: 50 - limit_ops: 2500 -``` - -| Parameter | Type | Default value | Description | -| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | -| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | -| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | -| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | -| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | -| `read` | `[]tag` | empty | Array of shard read settings for tags. | -| `write` | `[]tag` | empty | Array of shard write settings for tags. | -| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | -| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | -| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | -| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | -| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | - -# `node` section - -```yaml -node: - wallet: - path: /path/to/wallet.json - address: NcpJzXcSDrh5CCizf4K9Ro6w4t59J5LKzz - password: password - addresses: - - grpc://external.ip:8082 - attribute: - - "Price:11" - - "UN-LOCODE:RU MSK" - - "key:value" - persistent_sessions: - path: /sessions - persistent_state: - path: /state - locode_db_path: "/path/to/locode/db" -``` - -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | -| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | - -## `wallet` subsection -N3 wallet configuration. - -| Parameter | Type | Default value | Description | -|------------|----------|---------------|------------------------------| -| `path` | `string` | | Path to the wallet file. | -| `address` | `string` | | Wallet address to use. | -| `password` | `string` | | Password to open the wallet. | - -## `persistent_sessions` subsection - -Contains persistent session token store configuration. By default sessions do not persist between restarts. - -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|-----------------------| -| `path` | `string` | | Path to the database. | - -## `persistent_state` subsection -Configures persistent storage for auxiliary information, such as last seen block height. -It is used to correctly handle node restarts or crashes. - -| Parameter | Type | Default value | Description | -|-----------|----------|------------------------|------------------------| -| `path` | `string` | `.frostfs-storage-state` | Path to the database. | - -# `apiclient` section -Configuration for the FrostFS API client used for communication with other FrostFS nodes. - -```yaml -apiclient: - dial_timeout: 15s - stream_timeout: 20s - reconnect_timeout: 30s -``` -| Parameter | Type | Default value | Description | -|-------------------|----------|---------------|-----------------------------------------------------------------------| -| dial_timeout | duration | `5s` | Timeout for dialing connections to other storage or inner ring nodes. | -| stream_timeout | duration | `15s` | Timeout for individual operations in a streaming RPC. | -| reconnect_timeout | duration | `30s` | Time to wait before reconnecting to a failed node. | - -# `policer` section - -Configuration for the Policer service. It ensures that object is stored according to the intended policy. - -```yaml -policer: - head_timeout: 15s -``` - -| Parameter | Type | Default value | Description | -|----------------|------------|---------------|----------------------------------------------| -| `head_timeout` | `duration` | `5s` | Timeout for performing the `HEAD` operation. | - -# `replicator` section - -Configuration for the Replicator service. - -```yaml -replicator: - put_timeout: 15s - pool_size: 10 -``` - -| Parameter | Type | Default value | Description | -|---------------|------------|---------------|---------------------------------------------| -| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | -| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. | - -# `object` section -Contains object-service related parameters. - -```yaml -object: - get: - priority: - - $attribute:ClusterName -``` - -| Parameter | Type | Default value | Description | -|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| -| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | - - -# `rpc` section -Contains limits on the number of active RPC for specified method(s). - -```yaml -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 1000 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 -``` - -| Parameter | Type | Default value | Description | -|------------------|------------|---------------|--------------------------------------------------------------| -| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) | -| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit | - -# `runtime` section -Contains runtime parameters. - -```yaml -runtime: - soft_memory_limit: 1GB -``` - -| Parameter | Type | Default value | Description | -|---------------------|--------|---------------|--------------------------------------------------------------------------| -| `soft_memory_limit` | `size` | 0 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. | - -# `audit` section -Contains audit parameters. - -```yaml -audit: - enabled: true -``` - -| Parameter | Type | Default value | Description | -|-----------|--------|---------------|---------------------------------------------------| -| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. | - - -# `multinet` section -Contains multinet parameters. - -```yaml -multinet: - enabled: true - subnets: - - mask: 192.168.219.174/24 - source_ips: - - 192.168.218.185 - - 192.168.219.185 - - mask: 10.78.70.74/24 - source_ips: - - 10.78.70.185 - - 10.78.71.185 - balancer: roundrobin - restrict: false - fallback_delay: 350ms -``` - -| Parameter | Type | Default value | Description | -| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `enabled` | `bool` | false | If `true` then source-based routing is enabled. | -| `subnets` | `subnet` | empty | Resulting subnets. | -| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | -| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | -| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | - -# `qos` section -```yaml -qos: - critical: - authorized_keys: - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - internal: - authorized_keys: - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 -``` -| Parameter | Type | Default value | Description | -| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- | -| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. | -| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. | diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md deleted file mode 100644 index 195e0c6b3..000000000 --- a/docs/update-go-instruction.md +++ /dev/null @@ -1,34 +0,0 @@ -# Updating Golang version - -## Update go.mod - -`go mod edit -go=X.Y` - -## Update CI - -Change Golang versions for unit test in CI. -There is `go` section in `.forgejo/workflows/*.yml` files: -```yaml -jobs: - test: - runs-on: ubuntu-20.04 - strategy: - matrix: - go: [ 'X.Y.x', 'X.Y.x', 'X.Y.x' ] -``` - -That section should contain three latest Golang minor versions. - -## Update docker images - -Update all docker files that contain `golang` image in `./docker` -directory. - -## Update Makefile - -Update `GO_VERSION` variable in `./Makefile`. - -## Apply language changes - -Open PR that fixes/updates repository's code according to -language improvements. diff --git a/go.mod b/go.mod deleted file mode 100644 index 6f1950936..000000000 --- a/go.mod +++ /dev/null @@ -1,137 +0,0 @@ -module git.frostfs.info/TrueCloudLab/frostfs-node - -go 1.23.0 - -require ( - code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 - git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa - git.frostfs.info/TrueCloudLab/hrw v1.2.1 - git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 - git.frostfs.info/TrueCloudLab/tzhash v1.8.0 - git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 - github.com/VictoriaMetrics/easyproto v0.1.4 - github.com/cheggaaa/pb v1.0.29 - github.com/chzyer/readline v1.5.1 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/felixge/fgprof v0.9.5 - github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 - github.com/gdamore/tcell/v2 v2.7.4 - github.com/go-pkgz/expirable-cache/v3 v3.0.0 - github.com/google/uuid v1.6.0 - github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.4 - github.com/mailru/easyjson v0.7.7 - github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.15.0 - github.com/nspcc-dev/neo-go v0.106.3 - github.com/olekukonko/tablewriter v0.0.5 - github.com/panjf2000/ants/v2 v2.9.0 - github.com/prometheus/client_golang v1.19.0 - github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 - github.com/spf13/cast v1.6.0 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.19.0 - github.com/ssgreg/journald v1.0.0 - github.com/stretchr/testify v1.9.0 - go.etcd.io/bbolt v1.3.10 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 - go.uber.org/zap v1.27.0 - golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 - golang.org/x/term v0.30.0 - google.golang.org/grpc v1.69.2 - google.golang.org/protobuf v1.36.1 - gopkg.in/yaml.v3 v3.0.1 -) - -require ( - github.com/sagikazarmark/locafero v0.6.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect -) - -require ( - git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect - github.com/antlr4-go/antlr/v4 v4.13.1 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.13.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/davidmz/go-pageant v1.0.2 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gdamore/encoding v1.0.0 // indirect - github.com/go-fed/httpsig v1.1.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/holiman/uint256 v1.2.4 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect - github.com/klauspost/reedsolomon v1.12.1 // indirect - github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect - github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect - github.com/nspcc-dev/rfc6979 v0.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/twmb/murmur3 v1.1.8 // indirect - github.com/urfave/cli v1.22.14 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.31.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.4.0 // indirect - rsc.io/tmplfunc v0.0.3 // indirect -) - -replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 diff --git a/go.sum b/go.sum deleted file mode 100644 index 5b075f60a..000000000 --- a/go.sum +++ /dev/null @@ -1,451 +0,0 @@ -code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= -code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= -git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= -git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= -git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= -git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= -git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= -git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= -git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= -git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= -git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= -git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= -git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= -git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8= -git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4= -git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc= -github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710= -github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= -github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= -github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= -github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= -github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= -github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= -github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= -github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= -github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= -github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= -github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 h1:tYj5Ydh5D7Xg2R1tJnoG36Yta7NVB8C0vx36oPA3Bbw= -github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= -github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= -github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= -github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= -github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI= -github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= -github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= -github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU= -github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg= -github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= -github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw= -github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= -github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= -github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= -github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= -github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= -github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk= -github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY= -github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM= -github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= -github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo= -github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 h1:o1CYtoFOm6xJK3DvDAEG5wDJPLj+SoxUtUDFaQgt1iY= -github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= -github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= -github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU= -github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= -github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= -github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/help.mk b/help.mk deleted file mode 100644 index a2ac989dc..000000000 --- a/help.mk +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: help - -# Show this help prompt -help: - @echo ' Usage:' - @echo '' - @echo ' make ' - @echo '' - @echo ' Targets:' - @echo '' - @awk '/^#/{ comment = substr($$0,3) } /^[a-zA-Z][a-zA-Z0-9_-]+:/{ print " ", $$1, comment; comment = "" }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq diff --git a/internal/ape/util.go b/internal/ape/util.go deleted file mode 100644 index 99eba95ba..000000000 --- a/internal/ape/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package ape - -import "regexp" - -var ( - SubjectNameRegexp = regexp.MustCompile(`^[\w+=,.@-]{1,64}$`) - GroupNameRegexp = regexp.MustCompile(`^[\w+=,.@-]{1,128}$`) - - // NamespaceNameRegexp similar to https://git.frostfs.info/TrueCloudLab/frostfs-contract/src/commit/f2a82aa635aa57d9b05092d8cf15b170b53cc324/nns/nns_contract.go#L690 - NamespaceNameRegexp = regexp.MustCompile(`(^$)|(^[a-z0-9]{1,2}$)|(^[a-z0-9][a-z0-9-]{1,48}[a-z0-9]$)`) -) diff --git a/internal/assert/cond.go b/internal/assert/cond.go deleted file mode 100644 index 113d2eba9..000000000 --- a/internal/assert/cond.go +++ /dev/null @@ -1,29 +0,0 @@ -package assert - -import ( - "fmt" - "strings" -) - -func True(cond bool, details ...string) { - if !cond { - panic(strings.Join(details, " ")) - } -} - -func False(cond bool, details ...string) { - if cond { - panic(strings.Join(details, " ")) - } -} - -func NoError(err error, details ...string) { - if err != nil { - content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) - panic(content) - } -} - -func Fail(details ...string) { - panic(strings.Join(details, " ")) -} diff --git a/internal/audit/consts.go b/internal/audit/consts.go deleted file mode 100644 index f4fa19ab9..000000000 --- a/internal/audit/consts.go +++ /dev/null @@ -1,7 +0,0 @@ -package audit - -const ( - InvalidValue = "invalid_value" - NotDefined = "not_defined" - Empty = "empty" -) diff --git a/internal/audit/request.go b/internal/audit/request.go deleted file mode 100644 index 17666ab4b..000000000 --- a/internal/audit/request.go +++ /dev/null @@ -1,47 +0,0 @@ -package audit - -import ( - "context" - - crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" -) - -type Request interface { - GetVerificationHeader() *session.RequestVerificationHeader -} - -type Target interface { - String() string -} - -func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) { - var key []byte - if req != nil { - key = req.GetVerificationHeader().GetBodySignature().GetKey() - } - LogRequestWithKey(ctx, log, operation, key, target, status) -} - -func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) { - object, subject := NotDefined, NotDefined - - publicKey := crypto.UnmarshalPublicKey(key) - if publicKey != nil { - subject = ((*keys.PublicKey)(publicKey)).StringCompressed() - } - - if target != nil { - object = target.String() - } - - log.Info(ctx, logs.AuditEventLogRecord, - zap.String("operation", operation), - zap.String("object", object), - zap.String("subject", subject), - zap.Bool("success", status)) -} diff --git a/internal/audit/target.go b/internal/audit/target.go deleted file mode 100644 index 2d6881e29..000000000 --- a/internal/audit/target.go +++ /dev/null @@ -1,102 +0,0 @@ -package audit - -import ( - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type ModelType[T any] interface { - ReadFromV2(m T) error - String() string -} - -func TargetFromRef[T any](ref *T, model ModelType[T]) Target { - if ref == nil { - return stringTarget{s: NotDefined} - } - if err := model.ReadFromV2(*ref); err != nil { - return stringTarget{s: InvalidValue} - } - return stringTarget{s: model.String()} -} - -func TargetFromRefs[T any](refs []*T, model ModelType[T]) Target { - if len(refs) == 0 { - return stringTarget{s: NotDefined} - } - sb := &strings.Builder{} - for idx, ref := range refs { - if idx > 0 { - sb.WriteString(";") - } - if ref == nil { - sb.WriteString(Empty) - continue - } - if err := model.ReadFromV2(*ref); err != nil { - sb.WriteString(InvalidValue) - } else { - sb.WriteString(model.String()) - } - } - return sb -} - -type stringTarget struct { - s string -} - -func (t stringTarget) String() string { - return t.s -} - -func TargetFromString(s string) Target { - if len(s) == 0 { - s = Empty - } - return stringTarget{s: s} -} - -func TargetFromChainID(chainTargetType, chainTargetName string, chainID []byte) Target { - if len(chainTargetType) == 0 && len(chainTargetName) == 0 && len(chainID) == 0 { - return stringTarget{s: NotDefined} - } - t, n, c := Empty, Empty, Empty - if len(chainTargetType) > 0 { - t = chainTargetType - } - if len(chainTargetName) > 0 { - n = chainTargetName - } - if len(chainID) > 0 { - c = string(chainID) - } - return stringTarget{s: t + ":" + n + ":" + c} -} - -func TargetFromContainerIDObjectID(containerID *refs.ContainerID, objectID *refs.ObjectID) Target { - if containerID == nil && objectID == nil { - return stringTarget{s: NotDefined} - } - c, o := Empty, Empty - if containerID != nil { - var cnr cid.ID - if err := cnr.ReadFromV2(*containerID); err != nil { - c = InvalidValue - } else { - c = cnr.EncodeToString() - } - } - if objectID != nil { - var obj oid.ID - if err := obj.ReadFromV2(*objectID); err != nil { - o = InvalidValue - } else { - o = obj.EncodeToString() - } - } - return stringTarget{s: c + "/" + o} -} diff --git a/internal/logs/logs.go b/internal/logs/logs.go deleted file mode 100644 index 626372f43..000000000 --- a/internal/logs/logs.go +++ /dev/null @@ -1,521 +0,0 @@ -package logs - -// Common service logs. -const ( - ServingRequest = "serving request..." - OperationFinishedSuccessfully = "operation finished successfully" - OperationFinishedWithError = "operation finished with error" - - TryingToExecuteInContainer = "trying to execute in container..." - CouldNotGetCurrentEpochNumber = "could not get current epoch number" - ProcessEpoch = "process epoch" - ProcessingNode = "processing node..." - NoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" - InterruptPlacementIterationByContext = "interrupt placement iteration by context" - - Notification = "notification" -) - -const ( - InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" - InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" - InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" - InnerringCantGetInnerRingIndex = "can't get inner ring index" - InnerringCantGetInnerRingSize = "can't get inner ring size" - InnerringCantGetAlphabetIndex = "can't get alphabet index" - InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" - InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" - InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" - InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" - InnerringNotarySupport = "notary support" - InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" - InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" - InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" - InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" - InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" - InnerringNewBlock = "new block" - InnerringCantUpdatePersistentState = "can't update persistent state" - InnerringCloserError = "closer error" - InnerringReadConfigFromBlockchain = "read config from blockchain" - PolicerCouldNotGetContainer = "could not get container" - PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal" - PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" - PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" - PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" - PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" - PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" - PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" - PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" - PolicerRoutineStopped = "routine stopped" - PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" - PolicerPoolSubmission = "pool submission" - PolicerUnableToProcessObj = "unable to process object" - ReplicatorFinishWork = "finish work" - ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" - ReplicatorCouldNotReplicateObject = "could not replicate object" - ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" - TreeRedirectingTreeServiceQuery = "redirecting tree service query" - TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" - TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" - TreeSynchronizeTree = "synchronize tree" - TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" - TreeFailedToRunTreeSynchronizationForSpecificNode = "failed to run tree synchronization for specific node" - TreeFailedToParseAddressForTreeSynchronization = "failed to parse address for tree synchronization" - TreeFailedToConnectForTreeSynchronization = "failed to connect for tree synchronization" - TreeSyncingTrees = "syncing trees..." - TreeCouldNotFetchContainers = "could not fetch containers" - TreeTreesHaveBeenSynchronized = "trees have been synchronized" - TreeSyncingContainerTrees = "syncing container trees..." - TreeCouldNotSyncTrees = "could not sync trees" - TreeContainerTreesHaveBeenSynced = "container trees have been synced" - TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" - TreeRemovingRedundantTrees = "removing redundant trees..." - TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed" - TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" - TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" - TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" - TreeDoNotSendUpdateToTheNode = "do not send update to the node" - TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" - TreeErrorDuringReplication = "error during replication" - PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" - PersistentCouldNotDeleteSToken = "could not delete token" - PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" - TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" - DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" - DeleteAssemblingChain = "assembling chain..." - DeleteCollectingChildren = "collecting children..." - DeleteSupplementBySplitID = "supplement by split ID" - DeleteFormingTombstoneStructure = "forming tombstone structure..." - DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." - DeleteFormingSplitInfo = "forming split info..." - DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." - DeleteMembersSuccessfullyCollected = "members successfully collected" - DeleteECObjectReceived = "erasure-coded object received, form tombstone" - GetRemoteCallFailed = "remote call failed" - GetCanNotAssembleTheObject = "can not assemble the object" - GetTryingToAssembleTheObject = "trying to assemble the object..." - GetTryingToAssembleTheECObject = "trying to assemble the ec object..." - GetAssemblingSplittedObject = "assembling splitted object..." - GetAssemblingECObject = "assembling erasure-coded object..." - GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed" - GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object" - GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object" - GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object" - GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" - GetAssemblingECObjectCompleted = "assembling erasure-coded object completed" - GetFailedToAssembleSplittedObject = "failed to assemble splitted object" - GetFailedToAssembleECObject = "failed to assemble erasure-coded object" - GetCouldNotGenerateContainerTraverser = "could not generate container traverser" - GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" - GetCouldNotWriteHeader = "could not write header" - GetCouldNotWritePayloadChunk = "could not write payload chunk" - GetLocalGetFailed = "local get failed" - GetReturnResultDirectly = "return result directly" - GetCompletingTheOperation = "completing the operation" - GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" - GetRequestedObjectIsVirtual = "requested object is virtual" - GetRequestedObjectIsEC = "requested object is erasure-coded" - GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" - GetUnexpectedECObject = "failed to get EC object from node: expected EC info, but got full object" - PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" - SearchReturnResultDirectly = "return result directly" - SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" - SearchRemoteOperationFailed = "remote operation failed" - SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" - SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" - SearchLocalOperationFailed = "local operation failed" - UtilObjectServiceError = "object service error" - V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" - V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" - ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" - ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" - ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" - ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" - ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" - ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" - ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" - ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" - ClientNotaryDepositInvoke = "notary deposit invoke" - ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" - ClientNotaryRequestInvoked = "notary request invoked" - ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted" - ClientNeoClientInvoke = "neo client invoke" - ClientNativeGasTransferInvoke = "native gas transfer invoke" - ClientBatchGasTransferInvoke = "batch gas transfer invoke" - ClientCantGetBlockchainHeight = "can't get blockchain height" - ClientCantGetBlockchainHeight243 = "can't get blockchain height" - EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" - EventStopEventListenerByError = "stop event listener by error" - EventStopEventListenerByContext = "stop event listener by context" - EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" - EventNilNotificationEventWasCaught = "nil notification event was caught" - EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" - EventNilNotaryEventWasCaught = "nil notary event was caught" - EventStopEventListenerByBlockChannel = "stop event listener by block channel" - EventNilBlockWasCaught = "nil block was caught" - EventListenerWorkerPoolDrained = "listener worker pool drained" - EventEventParserNotSet = "event parser not set" - EventCouldNotParseNotificationEvent = "could not parse notification event" - EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" - EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" - EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" - EventNotaryParserNotSet = "notary parser not set" - EventCouldNotParseNotaryEvent = "could not parse notary event" - EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" - EventRegisteredNewEventParser = "registered new event parser" - EventRegisteredNewEventHandler = "registered new event handler" - EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" - StorageOperation = "local object storage operation" - BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" - BlobovniczaOpeningBoltDB = "opening BoltDB" - BlobovniczaInitializing = "initializing..." - BlobovniczaAlreadyInitialized = "already initialized" - BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" - BlobovniczaClosingBoltDB = "closing BoltDB" - BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" - BlobstorOpening = "opening..." - BlobstorInitializing = "initializing..." - BlobstorClosing = "closing..." - BlobstorCouldntCloseStorage = "couldn't close storage" - BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" - BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" - EngineShardHasBeenRemoved = "shard has been removed" - EngineCouldNotCloseRemovedShard = "could not close removed shard" - EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" - EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" - EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" - EngineCouldNotCloseShard = "could not close shard" - EngineCouldNotReloadAShard = "could not reload a shard" - EngineAddedNewShard = "added new shard" - EngineCouldNotPutObjectToShard = "could not put object to shard" - EngineCouldNotCheckObjectExistence = "could not check object existence when put object to shard" - EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" - EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" - EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" - EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" - EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" - EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" - EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" - EngineInterruptGettingLockers = "can't get object's lockers" - EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" - EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" - EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" - EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" - EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" - EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" - EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" - EngineStartedShardsEvacuation = "started shards evacuation" - EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully" - EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error" - EngineObjectIsMovedToAnotherShard = "object is moved to another shard" - MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" - MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" - MetabaseCheckingMetabaseVersion = "checking metabase version" - ShardCantSelectAllObjects = "can't select all objects" - ShardSettingShardMode = "setting shard mode" - ShardShardModeSetSuccessfully = "shard mode set successfully" - ShardFetchingObjectWithoutMeta = "fetching object without meta" - ShardObjectIsMissingInWritecache = "object is missing in write-cache" - ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" - ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" - ShardMetaObjectCounterRead = "meta: object counter read" - ShardMetaCantReadContainerList = "meta: can't read container list" - ShardMetaCantReadContainerSize = "meta: can't read container size" - ShardMetaInfoPresentButObjectNotFound = "meta info was present, but the object is missing" - ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" - ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" - ShardCouldNotUnmarshalObject = "could not unmarshal object" - ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted" - ShardCouldNotCloseShardComponent = "could not close shard component" - ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" - ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" - ShardStopEventListenerByClosedEventChannel = "stop event listener by closed `event` channel" - ShardStopEventListenerByClosedStopChannel = "stop event listener by closed `stop` channel" - ShardEventProcessingInProgress = "event processing is in progress, skip the received" - ShardStopEventListenerByContext = "stop event listener by context" - ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" - ShardGCIsStopped = "GC is stopped" - ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." - ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" - ShardCouldNotDeleteTheObjects = "could not delete the objects" - ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" - ShardCouldNotInhumeTheObjects = "could not inhume the objects" - ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" - ShardIteratingTombstones = "iterating tombstones" - ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" - ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" - ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" - ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" - ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" - ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" - ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" - ShardFailureToUnlockObjects = "failure to unlock objects" - ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" - ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" - ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" - ShardCouldNotFindObject = "could not find object" - WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" - WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" - BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" - BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" - BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" - BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" - BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" - BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" - BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" - BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." - BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." - AlphabetTick = "tick" - AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" - AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" - AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" - AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" - AlphabetStorageNodeEmissionIsOff = "storage node emission is off" - AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" - AlphabetGasEmission = "gas emission" - AlphabetCantParseNodePublicKey = "can't parse node public key" - AlphabetCantTransferGas = "can't transfer gas" - AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" - AlphabetAlphabetWorkerPool = "alphabet worker pool" - BalanceBalanceWorkerPoolDrained = "balance worker pool drained" - BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" - BalanceCantSendLockAssetTx = "can't send lock asset tx" - BalanceBalanceWorkerPool = "balance worker pool" - ContainerContainerWorkerPool = "container worker pool" - ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" - ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" - ContainerPutContainerCheckFailed = "put container check failed" - ContainerCouldNotApprovePutContainer = "could not approve put container" - ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" - ContainerDeleteContainerCheckFailed = "delete container check failed" - ContainerCouldNotApproveDeleteContainer = "could not approve delete container" - FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" - FrostFSCantRelaySetConfigEvent = "can't relay set config event" - FrostFSFrostfsWorkerPool = "frostfs worker pool" - FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" - FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" - FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" - FrostFSDoubleMintEmissionDeclined = "double mint emission declined" - FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" - FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" - FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" - FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" - FrostFSCantCreateLockAccount = "can't create lock account" - FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" - FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" - FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" - GovernanceNewEvent = "new event" - GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" - GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" - GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" - GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" - GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" - GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" - GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" - GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" - GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" - GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" - GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" - GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" - GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" - GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" - GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" - NetmapNetmapWorkerPool = "netmap worker pool" - NetmapTick = "tick" - NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" - NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" - NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" - NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" - NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" - NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" - NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" - NetmapCantGetEpochDuration = "can't get epoch duration" - NetmapCantGetTransactionHeight = "can't get transaction height" - NetmapCantResetEpochTimer = "can't reset epoch timer" - NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" - NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" - NetmapNextEpoch = "next epoch" - NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" - NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" - NetmapNonhaltNotaryTransaction = "non-halt notary transaction" - NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" - NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" - NetmapApprovingNetworkMapCandidate = "approving network map candidate" - NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" - NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" - NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" - NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" - FrostFSIRInternalError = "internal error" - FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" - FrostFSIRApplicationStopped = "application stopped" - FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint" - FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint" - FrostFSIRReloadExtraWallets = "reload extra wallets" - FrostFSNodeStartListeningEndpoint = "start listening endpoint" - FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" - FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" - FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" - FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." - FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" - FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" - FrostFSNodeGRPCServerError = "gRPC server error" - FrostFSNodeGRPCReconnecting = "reconnecting gRPC server..." - FrostFSNodeGRPCReconnectedSuccessfully = "gRPC server reconnected successfully" - FrostFSNodeGRPCServerConfigNotFound = "gRPC server config not found" - FrostFSNodeGRPCReconnectFailed = "failed to reconnect gRPC server" - FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" - FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" - FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" - FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" - FrostFSNodeShardAttachedToEngine = "shard attached to engine" - FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." - FrostFSNodeAccessPolicyEngineClosingFailure = "ape closing failure" - FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" - FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed = "persistent rule storage db path is not set: in-memory will be used" - FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" - FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" - FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" - FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." - FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" - FrostFSNodeInternalApplicationError = "internal application error" - FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" - FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." - FrostFSNodeSIGHUPSkip = "node is not ready for reconfiguration, skipped SIGHUP" - FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" - FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" - FrostFSNodeConfigurationReading = "configuration reading" - FrostFSNodeTracingConfigationUpdated = "tracing configation updated" - FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" - FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" - FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" - FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" - FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" - FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" - FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" - FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" - FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" - FrostFSNodeFailedInitTracing = "failed init tracing" - FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" - FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" - FrostFSNodeClosingMorphComponents = "closing morph components..." - FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" - FrostFSNodeNotarySupport = "notary support" - FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" - FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" - FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" - FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" - FrostFSNodeNewBlock = "new block" - FrostFSNodeCantUpdatePersistentState = "can't update persistent state" - FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" - FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" - FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" - FrostFSNodeInitialNetworkState = "initial network state" - FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" - FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" - FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" - FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" - FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" - FrostFSNodePolicerIsDisabled = "policer is disabled" - CommonApplicationStarted = "application started" - ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started" - ShardGCCollectingExpiredObjectsCompleted = "collecting expired objects completed" - ShardGCCollectingExpiredLocksStarted = "collecting expired locks started" - ShardGCCollectingExpiredLocksCompleted = "collecting expired locks completed" - ShardGCRemoveGarbageStarted = "garbage remove started" - ShardGCRemoveGarbageCompleted = "garbage remove completed" - EngineShardsEvacuationFailedToCount = "failed to get total objects count to evacuate" - EngineShardsEvacuationFailedToListObjects = "failed to list objects to evacuate" - EngineShardsEvacuationFailedToReadObject = "failed to read object to evacuate" - EngineShardsEvacuationFailedToMoveObject = "failed to evacuate object to other node" - ShardGCFailedToGetExpiredWithLinked = "failed to get expired objects with linked" - FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap = "the node is under maintenance, skip initial bootstrap" - EngineCouldNotChangeShardModeToDisabled = "could not change shard mode to disabled" - RPConnectionLost = "RPC connection lost, attempting reconnect" - RPCNodeSwitchFailure = "can't switch RPC node" - FSTreeCantUnmarshalObject = "can't unmarshal an object" - FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor" - FSTreeCantUpdateID = "can't update object storage ID" - PutSingleRedirectFailure = "failed to redirect PutSingle request" - StorageIDRetrievalFailure = "can't get storage ID from metabase" - ObjectRemovalFailureBlobStor = "can't remove object from blobStor" - CandidateStatusPriority = "candidate status is different from the netmap status, the former takes priority" - TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch" - RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" - RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" - AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza" - FailedToGetContainerCounters = "failed to get container counters values" - FailedToRebuildBlobstore = "failed to rebuild blobstore" - BlobstoreRebuildStarted = "blobstore rebuild started" - BlobstoreRebuildCompletedSuccessfully = "blobstore rebuild completed successfully" - BlobstoreRebuildStopped = "blobstore rebuild stopped" - BlobstorRebuildFailedToRebuildStorages = "failed to rebuild storages" - BlobstorRebuildRebuildStoragesCompleted = "storages rebuild completed" - BlobovniczaTreeCollectingDBToRebuild = "collecting blobovniczas to rebuild..." - BlobovniczaTreeCollectingDBToRebuildFailed = "collecting blobovniczas to rebuild failed" - BlobovniczaTreeCollectingDBToRebuildSuccess = "collecting blobovniczas to rebuild completed successfully" - BlobovniczaTreeRebuildingBlobovnicza = "rebuilding blobovnicza..." - BlobovniczaTreeRebuildingBlobovniczaFailed = "rebuilding blobovnicza failed" - BlobovniczaTreeRebuildingBlobovniczaSuccess = "rebuilding blobovnicza completed successfully" - BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza = "could not put move info to source blobovnicza" - BlobovniczatreeCouldNotUpdateStorageID = "could not update storage ID" - BlobovniczatreeCouldNotDropMoveInfo = "could not drop move info from source blobovnicza" - BlobovniczatreeCouldNotDeleteFromSource = "could not delete object from source blobovnicza" - BlobovniczaTreeCompletingPreviousRebuild = "completing previous rebuild if failed..." - BlobovniczaTreeCompletedPreviousRebuildSuccess = "previous rebuild completed successfully" - BlobovniczaTreeCompletedPreviousRebuildFailed = "failed to complete previous rebuild" - BlobovniczatreeCouldNotCheckExistenceInTargetDB = "could not check object existence in target blobovnicza" - BlobovniczatreeCouldNotPutObjectToTargetDB = "could not put object to target blobovnicza" - BlobovniczaSavingCountersToMeta = "saving counters to blobovnicza's meta..." - BlobovniczaSavingCountersToMetaSuccess = "saving counters to blobovnicza's meta completed successfully" - BlobovniczaSavingCountersToMetaFailed = "saving counters to blobovnicza's meta failed" - ObjectRemovalFailureExistsInWritecache = "can't remove object: object must be flushed from writecache" - FailedToReportStatusToSystemd = "failed to report status to systemd" - ShardGCCollectingExpiredMetricsStarted = "collecting expired metrics started" - ShardGCCollectingExpiredMetricsCompleted = "collecting expired metrics completed" - ShardGCFailedToCollectZeroSizeContainers = "failed to collect zero-size containers" - ShardGCFailedToCollectZeroCountContainers = "failed to collect zero-count containers" - EngineFailedToCheckContainerAvailability = "failed to check container availability" - EngineFailedToGetContainerSize = "failed to get container size" - EngineFailedToDeleteContainerSize = "failed to delete container size" - EngineInterruptProcessingZeroSizeContainers = "interrupt processing zero-size containers" - EngineInterruptProcessingZeroCountContainers = "interrupt processing zero-count containers" - EngineFailedToGetContainerCounters = "failed to get container counters" - GetSvcV2FailedToParseNodeEndpoints = "failed to parse node endpoints" - GetSvcV2FailedToParseNodeExternalAddresses = "failed to parse node external addresses" - GetSvcV2FailedToGetRangeHashFromNode = "failed to get range hash from node" - GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes = "failed to get range hash from all of container nodes" - FailedToUpdateShardID = "failed to update shard id" - EngineShardsEvacuationFailedToMoveTree = "failed to evacuate tree to other node" - EngineShardsEvacuationTreeEvacuatedLocal = "tree evacuated to local node" - EngineShardsEvacuationTreeEvacuatedRemote = "tree evacuated to other node" - EngineRefillFailedToGetObjectsCount = "failed to get blobstor objects count, no resync percent estimation is available" - ECFailedToSendToContainerNode = "failed to send EC object to container node" - ECFailedToSaveECPart = "failed to save EC part" - PolicerNodeIsNotECObjectNode = "current node is not EC object node" - PolicerFailedToGetLocalECChunks = "failed to get local EC chunks" - PolicerMissingECChunk = "failed to find EC chunk on any of the nodes" - PolicerFailedToDecodeECChunkID = "failed to decode EC chunk ID" - PolicerDifferentObjectIDForTheSameECChunk = "different object IDs for the same EC chunk" - ReplicatorCouldNotGetObjectFromRemoteStorage = "could not get object from remote storage" - ReplicatorCouldNotPutObjectToLocalStorage = "could not put object to local storage" - PolicerCouldNotGetObjectFromNodeMoving = "could not get EC object from the node, moving current chunk to the node" - PolicerCouldNotRestoreObjectNotEnoughChunks = "could not restore EC object: not enough chunks" - PolicerFailedToRestoreObject = "failed to restore EC object" - PolicerCouldNotGetChunk = "could not get EC chunk" - PolicerCouldNotGetChunks = "could not get EC chunks" - AuditEventLogRecord = "audit event log record" - StartedWritecacheSealAsync = "started writecache seal async" - WritecacheSealCompletedAsync = "writecache seal completed successfully" - FailedToSealWritecacheAsync = "failed to seal writecache async" - WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty" - BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" - WritecacheCantGetObject = "can't get an object from fstree" - FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" - FailedToParseIncomingIOTag = "failed to parse incoming IO tag" - NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" - FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" - FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" - WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" - FailedToUpdateNetmapCandidates = "update netmap candidates failed" - UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" -) diff --git a/internal/metrics/application.go b/internal/metrics/application.go deleted file mode 100644 index 53acf9b7f..000000000 --- a/internal/metrics/application.go +++ /dev/null @@ -1,22 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type ApplicationInfo struct { - versionValue *prometheus.GaugeVec -} - -func NewApplicationInfo(version string) *ApplicationInfo { - appInfo := &ApplicationInfo{ - versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "app_info", - Help: "General information about the application.", - }, []string{"version"}), - } - appInfo.versionValue.With(prometheus.Labels{"version": version}) - return appInfo -} diff --git a/internal/metrics/blobovnicza.go b/internal/metrics/blobovnicza.go deleted file mode 100644 index 948272c88..000000000 --- a/internal/metrics/blobovnicza.go +++ /dev/null @@ -1,213 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type BlobobvnizcaMetrics interface { - SetBlobobvnizcaTreeMode(shardID, path string, mode mode.ComponentMode) - CloseBlobobvnizcaTree(shardID, path string) - BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) - AddBlobobvnizcaTreePut(shardID, path string, size int) - AddBlobobvnizcaTreeGet(shardID, path string, size int) - - AddOpenBlobovniczaSize(shardID, path string, size uint64) - SubOpenBlobovniczaSize(shardID, path string, size uint64) - - AddOpenBlobovniczaItems(shardID, path string, items uint64) - SubOpenBlobovniczaItems(shardID, path string, items uint64) - - IncOpenBlobovniczaCount(shardID, path string) - DecOpenBlobovniczaCount(shardID, path string) - - BlobovniczaTreeRebuildStatus(shardID, path, status string) - BlobovniczaTreeRebuildPercent(shardID, path string, value uint32) - BlobovniczaTreeObjectMoved(shardID, path string, d time.Duration) -} - -type blobovnicza struct { - treeMode *shardIDPathModeValue - treeReqDuration *prometheus.HistogramVec - treePut *prometheus.CounterVec - treeGet *prometheus.CounterVec - treeOpenSize *prometheus.GaugeVec - treeOpenItems *prometheus.GaugeVec - treeOpenCounter *prometheus.GaugeVec - treeObjectMoveDuration *prometheus.HistogramVec - treeRebuildStatus *shardIDPathModeValue - treeRebuildPercent *prometheus.GaugeVec -} - -func newBlobovnicza() *blobovnicza { - return &blobovnicza{ - treeMode: newShardIDPathMode(blobovniczaTreeSubSystem, "mode", "Blobovnicza tree mode"), - - treeReqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "request_duration_seconds", - Help: "Accumulated Blobovnicza tree request process duration", - }, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}), - treePut: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "put_bytes", - Help: "Accumulated payload size written to Blobovnicza tree", - }, []string{shardIDLabel, pathLabel}), - treeGet: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "get_bytes", - Help: "Accumulated payload size read from Blobovnicza tree", - }, []string{shardIDLabel, pathLabel}), - treeOpenSize: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "open_blobovnicza_size_bytes", - Help: "Size of opened blobovniczas of Blobovnicza tree", - }, []string{shardIDLabel, pathLabel}), - treeOpenItems: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "open_blobovnicza_items_total", - Help: "Count of items in opened blobovniczas of Blobovnicza tree", - }, []string{shardIDLabel, pathLabel}), - treeOpenCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "open_blobovnicza_count", - Help: "Count of opened blobovniczas of Blobovnicza tree", - }, []string{shardIDLabel, pathLabel}), - treeObjectMoveDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "object_move_duration_seconds", - Help: "Accumulated Blobovnicza tree object move duration", - }, []string{shardIDLabel, pathLabel}), - treeRebuildStatus: newShardIDPathMode(blobovniczaTreeSubSystem, "rebuild_status", "Blobovnicza tree rebuild status"), - treeRebuildPercent: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: blobovniczaTreeSubSystem, - Name: "rebuild_complete_percent", - Help: "Percent of rebuild completeness", - }, []string{shardIDLabel, pathLabel}), - } -} - -func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, mod mode.ComponentMode) { - b.treeMode.SetMode(shardID, path, mod.String()) -} - -func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) { - b.treeMode.SetMode(shardID, path, closedMode) - b.treeReqDuration.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - b.treeGet.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - b.treePut.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - b.treeObjectMoveDuration.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - b.treeRebuildPercent.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - b.treeRebuildStatus.SetMode(shardID, path, undefinedStatus) -} - -func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) { - b.treeReqDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - successLabel: strconv.FormatBool(success), - methodLabel: method, - withStorageIDLabel: withStorageID.String(), - }).Observe(d.Seconds()) -} - -func (b *blobovnicza) AddBlobobvnizcaTreePut(shardID, path string, size int) { - b.treePut.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Add(float64(size)) -} - -func (b *blobovnicza) AddBlobobvnizcaTreeGet(shardID, path string, size int) { - b.treeGet.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Add(float64(size)) -} - -func (b *blobovnicza) AddOpenBlobovniczaSize(shardID, path string, size uint64) { - b.treeOpenSize.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Add(float64(size)) -} - -func (b *blobovnicza) SubOpenBlobovniczaSize(shardID, path string, size uint64) { - b.treeOpenSize.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Sub(float64(size)) -} - -func (b *blobovnicza) IncOpenBlobovniczaCount(shardID, path string) { - b.treeOpenCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Inc() -} - -func (b *blobovnicza) DecOpenBlobovniczaCount(shardID, path string) { - b.treeOpenCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Dec() -} - -func (b *blobovnicza) AddOpenBlobovniczaItems(shardID, path string, items uint64) { - b.treeOpenItems.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Add(float64(items)) -} - -func (b *blobovnicza) SubOpenBlobovniczaItems(shardID, path string, items uint64) { - b.treeOpenItems.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Sub(float64(items)) -} - -func (b *blobovnicza) BlobovniczaTreeRebuildStatus(shardID, path, status string) { - b.treeRebuildStatus.SetMode(shardID, path, status) -} - -func (b *blobovnicza) BlobovniczaTreeObjectMoved(shardID, path string, d time.Duration) { - b.treeObjectMoveDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Observe(d.Seconds()) -} - -func (b *blobovnicza) BlobovniczaTreeRebuildPercent(shardID, path string, value uint32) { - b.treeRebuildPercent.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Set(float64(value)) -} diff --git a/internal/metrics/blobstore.go b/internal/metrics/blobstore.go deleted file mode 100644 index d9bb3f029..000000000 --- a/internal/metrics/blobstore.go +++ /dev/null @@ -1,87 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type BlobstoreMetrics interface { - SetMode(shardID string, readOnly bool) - Close(shardID string) - - MethodDuration(shardID string, method string, d time.Duration, success bool, withStorageID NullBool) - AddPut(shardID string, size int) - AddGet(shardID string, size int) -} - -type blobstoreMetrics struct { - mode *shardIDModeValue - reqDuration *prometheus.HistogramVec - put *prometheus.CounterVec - get *prometheus.CounterVec -} - -func newBlobstoreMetrics() *blobstoreMetrics { - return &blobstoreMetrics{ - mode: newShardIDMode(blobstoreSubSystem, "mode", "Blobstore mode value"), - reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: blobstoreSubSystem, - Name: "request_duration_seconds", - Help: "Accumulated Blobstore request process duration", - }, []string{shardIDLabel, successLabel, methodLabel, withStorageIDLabel}), - put: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: blobstoreSubSystem, - Name: "put_bytes", - Help: "Accumulated payload size written to Blobstore", - }, []string{shardIDLabel}), - get: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: blobstoreSubSystem, - Name: "get_bytes", - Help: "Accumulated payload size read from Blobstore", - }, []string{shardIDLabel}), - } -} - -func (m *blobstoreMetrics) SetMode(shardID string, readOnly bool) { - m.mode.SetMode(shardID, modeFromBool(readOnly)) -} - -func (m *blobstoreMetrics) Close(shardID string) { - m.mode.SetMode(shardID, closedMode) - m.reqDuration.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) - m.get.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) - m.put.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) -} - -func (m *blobstoreMetrics) MethodDuration(shardID string, method string, d time.Duration, success bool, withStorageID NullBool) { - m.reqDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - successLabel: strconv.FormatBool(success), - methodLabel: method, - withStorageIDLabel: withStorageID.String(), - }).Observe(d.Seconds()) -} - -func (m *blobstoreMetrics) AddPut(shardID string, size int) { - m.put.With(prometheus.Labels{ - shardIDLabel: shardID, - }).Add(float64(size)) -} - -func (m *blobstoreMetrics) AddGet(shardID string, size int) { - m.get.With(prometheus.Labels{ - shardIDLabel: shardID, - }).Add(float64(size)) -} diff --git a/internal/metrics/cache.go b/internal/metrics/cache.go deleted file mode 100644 index 8181586e2..000000000 --- a/internal/metrics/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -var cacheRequests = metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: commonCacheSubsystem, - Name: "request_duration_seconds", - Help: "Accumulated common cache request process duration", -}, []string{hitLabel, methodLabel, cacheLabel}) - -type CacheMetrics struct { - cache string -} - -// NewCacheMetrics returns new CacheMetrics instance for cache specified. -func NewCacheMetrics(cache string) *CacheMetrics { - return &CacheMetrics{ - cache: cache, - } -} - -func (m *CacheMetrics) AddMethodDuration(method string, d time.Duration, hit bool) { - cacheRequests.With(prometheus.Labels{ - hitLabel: strconv.FormatBool(hit), - methodLabel: method, - cacheLabel: m.cache, - }).Observe(d.Seconds()) -} diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go deleted file mode 100644 index 9123541ff..000000000 --- a/internal/metrics/consts.go +++ /dev/null @@ -1,56 +0,0 @@ -package metrics - -const ( - namespace = "frostfs_node" - innerRingNamespace = "frostfs_ir" - - fstreeSubSystem = "fstree" - blobstoreSubSystem = "blobstore" - blobovniczaTreeSubSystem = "blobovnicza_tree" - metabaseSubSystem = "metabase" - piloramaSubSystem = "pilorama" - engineSubsystem = "engine" - gcSubsystem = "garbage_collector" - innerRingSubsystem = "ir" - morphSubsystem = "morph" - morphCacheSubsystem = "morphcache" - objectSubsystem = "object" - replicatorSubsystem = "replicator" - stateSubsystem = "state" - treeServiceSubsystem = "treeservice" - writeCacheSubsystem = "writecache" - grpcServerSubsystem = "grpc_server" - policerSubsystem = "policer" - commonCacheSubsystem = "common_cache" - multinetSubsystem = "multinet" - qosSubsystem = "qos" - - successLabel = "success" - shardIDLabel = "shard_id" - modeLabel = "mode" - pathLabel = "path" - methodLabel = "method" - withStorageIDLabel = "with_storage_id" - statusLabel = "status" - objectTypeLabel = "object_type" - typeLabel = "type" - notificationTypeLabel = "notification_type" - invokeTypeLabel = "invoke_type" - contractLabel = "contract" - containerIDLabelKey = "cid" - storageLabel = "storage" - operationLabel = "operation" - endpointLabel = "endpoint" - hitLabel = "hit" - cacheLabel = "cache" - sourceIPLabel = "source_ip" - ioTagLabel = "io_tag" - - readWriteMode = "READ_WRITE" - readOnlyMode = "READ_ONLY" - closedMode = "CLOSED" - - failedToDeleteStatus = "failed_to_delete" - deletedStatus = "deleted" - undefinedStatus = "undefined" -) diff --git a/internal/metrics/engine.go b/internal/metrics/engine.go deleted file mode 100644 index 1d01c95ed..000000000 --- a/internal/metrics/engine.go +++ /dev/null @@ -1,223 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type EngineMetrics interface { - AddMethodDuration(method string, d time.Duration) - AddToContainerSize(cnrID string, size int64) - DeleteContainerSize(cnrID string) - DeleteContainerCount(cnrID string) - IncErrorCounter(shardID string) - ClearErrorCounter(shardID string) - DeleteShardMetrics(shardID string) - AddToObjectCounter(shardID, objectType string, delta int) - SetObjectCounter(shardID, objectType string, v uint64) - AddToPayloadCounter(shardID string, size int64) - SetMode(shardID string, mode mode.Mode) - SetContainerObjectCounter(shardID, contID, objectType string, v uint64) - IncContainerObjectCounter(shardID, contID, objectType string) - SubContainerObjectCounter(shardID, contID, objectType string, v uint64) - IncRefillObjectsCount(shardID, path string, size int, success bool) - SetRefillPercent(shardID, path string, percent uint32) - SetRefillStatus(shardID, path, status string) - SetEvacuationInProgress(shardID string, value bool) - - WriteCache() WriteCacheMetrics - GC() GCMetrics -} - -type engineMetrics struct { - methodDuration *prometheus.HistogramVec - objectCounter *prometheus.GaugeVec - containerSize *prometheus.GaugeVec - payloadSize *prometheus.GaugeVec - errorCounter *prometheus.GaugeVec - mode *shardIDModeValue - contObjCounter *prometheus.GaugeVec - - refillStatus *shardIDPathModeValue - refillObjCounter *prometheus.GaugeVec - refillPayloadCounter *prometheus.GaugeVec - refillPercentCounter *prometheus.GaugeVec - evacuationInProgress *shardIDModeValue - - gc *gcMetrics - writeCache *writeCacheMetrics -} - -func newEngineMetrics() *engineMetrics { - return &engineMetrics{ - containerSize: newEngineGaugeVector("container_size_bytes", "Accumulated size of all objects in a container", []string{containerIDLabelKey}), - payloadSize: newEngineGaugeVector("payload_size_bytes", "Accumulated size of all objects in a shard", []string{shardIDLabel}), - errorCounter: newEngineGaugeVector("errors_total", "Shard's error counter", []string{shardIDLabel}), - methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: engineSubsystem, - Name: "request_duration_seconds", - Help: "Duration of Engine requests", - }, []string{methodLabel}), - objectCounter: newEngineGaugeVector("objects_total", - "Objects counters per shards. DEPRECATED: Will be deleted in next releasese, use frostfs_node_engine_container_objects_total metric.", - []string{shardIDLabel, typeLabel}), - gc: newGCMetrics(), - writeCache: newWriteCacheMetrics(), - mode: newShardIDMode(engineSubsystem, "mode_info", "Shard mode"), - contObjCounter: newEngineGaugeVector("container_objects_total", "Count of objects for each container", []string{shardIDLabel, containerIDLabelKey, typeLabel}), - refillStatus: newShardIDPathMode(engineSubsystem, "resync_metabase_status", "Resync from blobstore to metabase status"), - refillObjCounter: newEngineGaugeVector("resync_metabase_objects_total", "Count of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}), - refillPayloadCounter: newEngineGaugeVector("resync_metabase_objects_size_bytes", "Size of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}), - refillPercentCounter: newEngineGaugeVector("resync_metabase_complete_percent", "Percent of resynced from blobstore to metabase completeness", []string{shardIDLabel, pathLabel}), - evacuationInProgress: newShardIDMode(engineSubsystem, "evacuation_in_progress", "Shard evacuation in progress"), - } -} - -func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeVec { - return metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: engineSubsystem, - Name: name, - Help: help, - }, labels) -} - -func (m *engineMetrics) AddMethodDuration(method string, d time.Duration) { - m.methodDuration.With(prometheus.Labels{ - methodLabel: method, - }).Observe(d.Seconds()) -} - -func (m *engineMetrics) AddToContainerSize(cnrID string, size int64) { - m.containerSize.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size)) -} - -func (m *engineMetrics) DeleteContainerSize(cnrID string) { - m.containerSize.DeletePartialMatch(prometheus.Labels{containerIDLabelKey: cnrID}) -} - -func (m *engineMetrics) DeleteContainerCount(cnrID string) { - m.contObjCounter.DeletePartialMatch(prometheus.Labels{containerIDLabelKey: cnrID}) -} - -func (m *engineMetrics) AddToPayloadCounter(shardID string, size int64) { - m.payloadSize.With(prometheus.Labels{shardIDLabel: shardID}).Add(float64(size)) -} - -func (m *engineMetrics) IncErrorCounter(shardID string) { - m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Inc() -} - -func (m *engineMetrics) ClearErrorCounter(shardID string) { - m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Set(0) -} - -func (m *engineMetrics) DeleteShardMetrics(shardID string) { - m.errorCounter.Delete(prometheus.Labels{shardIDLabel: shardID}) - m.payloadSize.Delete(prometheus.Labels{shardIDLabel: shardID}) - m.objectCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) - m.contObjCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) - m.refillObjCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) - m.refillPayloadCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) - m.refillPercentCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) - m.mode.Delete(shardID) - m.refillStatus.DeleteByShardID(shardID) - m.evacuationInProgress.Delete(shardID) -} - -func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) { - m.objectCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - typeLabel: objectType, - }, - ).Add(float64(delta)) -} - -func (m *engineMetrics) SetObjectCounter(shardID, objectType string, v uint64) { - m.objectCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - typeLabel: objectType, - }, - ).Set(float64(v)) -} - -func (m *engineMetrics) SetContainerObjectCounter(shardID, contID, objectType string, v uint64) { - m.contObjCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - containerIDLabelKey: contID, - typeLabel: objectType, - }, - ).Set(float64(v)) -} - -func (m *engineMetrics) IncContainerObjectCounter(shardID, contID, objectType string) { - m.contObjCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - containerIDLabelKey: contID, - typeLabel: objectType, - }, - ).Inc() -} - -func (m *engineMetrics) SubContainerObjectCounter(shardID, contID, objectType string, v uint64) { - m.contObjCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - containerIDLabelKey: contID, - typeLabel: objectType, - }, - ).Sub(float64(v)) -} - -func (m *engineMetrics) SetMode(shardID string, mode mode.Mode) { - m.mode.SetMode(shardID, mode.String()) -} - -func (m *engineMetrics) WriteCache() WriteCacheMetrics { - return m.writeCache -} - -func (m *engineMetrics) GC() GCMetrics { - return m.gc -} - -func (m *engineMetrics) IncRefillObjectsCount(shardID, path string, size int, success bool) { - m.refillObjCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - successLabel: strconv.FormatBool(success), - }, - ).Inc() - m.refillPayloadCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - successLabel: strconv.FormatBool(success), - }, - ).Add(float64(size)) -} - -func (m *engineMetrics) SetRefillPercent(shardID, path string, percent uint32) { - m.refillPercentCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Set(float64(percent)) -} - -func (m *engineMetrics) SetRefillStatus(shardID, path, status string) { - m.refillStatus.SetMode(shardID, path, status) -} - -func (m *engineMetrics) SetEvacuationInProgress(shardID string, value bool) { - m.evacuationInProgress.SetMode(shardID, strconv.FormatBool(value)) -} diff --git a/internal/metrics/fstree.go b/internal/metrics/fstree.go deleted file mode 100644 index ecd4352bb..000000000 --- a/internal/metrics/fstree.go +++ /dev/null @@ -1,93 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type FSTreeMetrics interface { - SetMode(shardID, path string, mode mode.ComponentMode) - Close(shardID, path string) - - MethodDuration(shardID, path string, method string, d time.Duration, success bool) - AddGet(shardID, path string, size int) - AddPut(shardID, path string, size int) -} - -type fstreeMetrics struct { - mode *shardIDPathModeValue - reqDuration *prometheus.HistogramVec - put *prometheus.CounterVec - get *prometheus.CounterVec -} - -func newFSTreeMetrics() *fstreeMetrics { - return &fstreeMetrics{ - mode: newShardIDPathMode(fstreeSubSystem, "mode", "FSTree mode value"), - reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: fstreeSubSystem, - Name: "request_duration_seconds", - Help: "Accumulated FSTree request process duration", - }, []string{shardIDLabel, successLabel, pathLabel, methodLabel}), - put: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: fstreeSubSystem, - Name: "put_bytes", - Help: "Accumulated payload size written to FSTree", - }, []string{shardIDLabel, pathLabel}), - get: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: fstreeSubSystem, - Name: "get_bytes", - Help: "Accumulated payload size read from FSTree", - }, []string{shardIDLabel, pathLabel}), - } -} - -func (m *fstreeMetrics) SetMode(shardID, path string, mod mode.ComponentMode) { - m.mode.SetMode(shardID, path, mod.String()) -} - -func (m *fstreeMetrics) Close(shardID, path string) { - m.mode.SetMode(shardID, path, closedMode) - m.reqDuration.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - m.get.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - m.put.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) -} - -func (m *fstreeMetrics) MethodDuration(shardID, path string, method string, d time.Duration, success bool) { - m.reqDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - successLabel: strconv.FormatBool(success), - methodLabel: method, - }).Observe(d.Seconds()) -} - -func (m *fstreeMetrics) AddGet(shardID, path string, size int) { - m.get.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Add(float64(size)) -} - -func (m *fstreeMetrics) AddPut(shardID, path string, size int) { - m.put.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }).Add(float64(size)) -} diff --git a/internal/metrics/gc.go b/internal/metrics/gc.go deleted file mode 100644 index 53bfef0e5..000000000 --- a/internal/metrics/gc.go +++ /dev/null @@ -1,88 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type GCMetrics interface { - AddRunDuration(shardID string, d time.Duration, success bool) - AddDeletedCount(shardID string, deleted, failed uint64) - AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string) - AddInhumedObjectCount(shardID string, count uint64, objectType string) -} - -type gcMetrics struct { - runDuration *prometheus.CounterVec - deletedCounter *prometheus.CounterVec - expCollectDuration *prometheus.CounterVec - inhumedCounter *prometheus.CounterVec -} - -func newGCMetrics() *gcMetrics { - return &gcMetrics{ - runDuration: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: gcSubsystem, - Name: "delete_duration_seconds", - Help: "The total time of GC runs to delete objects from disk", - }, []string{shardIDLabel, successLabel}), - deletedCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: gcSubsystem, - Name: "deleted_objects_total", - Help: "Total count of objects GC deleted or failed to delete from disk", - }, []string{shardIDLabel, statusLabel}), - expCollectDuration: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: gcSubsystem, - Name: "marking_duration_seconds", - Help: "The total time of GC runs to mark expired objects as removed", - }, []string{shardIDLabel, successLabel, objectTypeLabel}), - inhumedCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: gcSubsystem, - Name: "marked_for_removal_objects_total", - Help: "Total count of expired objects GC marked to remove", - }, []string{shardIDLabel, objectTypeLabel}), - } -} - -func (m *gcMetrics) AddRunDuration(shardID string, d time.Duration, success bool) { - m.runDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - successLabel: strconv.FormatBool(success), - }).Add(d.Seconds()) -} - -func (m *gcMetrics) AddDeletedCount(shardID string, deleted, failed uint64) { - m.deletedCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - statusLabel: deletedStatus, - }).Add(float64(deleted)) - m.deletedCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - statusLabel: failedToDeleteStatus, - }).Add(float64(failed)) -} - -func (m *gcMetrics) AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string) { - m.expCollectDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - successLabel: strconv.FormatBool(success), - objectTypeLabel: objectType, - }).Add(d.Seconds()) -} - -func (m *gcMetrics) AddInhumedObjectCount(shardID string, count uint64, objectType string) { - m.inhumedCounter.With( - prometheus.Labels{ - shardIDLabel: shardID, - objectTypeLabel: objectType, - }).Add(float64(count)) -} diff --git a/internal/metrics/grpc.go b/internal/metrics/grpc.go deleted file mode 100644 index a83f53998..000000000 --- a/internal/metrics/grpc.go +++ /dev/null @@ -1,35 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type GrpcServerMetrics interface { - MarkHealthy(endpoint string) - MarkUnhealthy(endpoint string) -} - -type grpcServerMetrics struct { - endpointHealth *prometheus.GaugeVec -} - -func newGrpcServerMetrics() *grpcServerMetrics { - return &grpcServerMetrics{ - endpointHealth: metrics.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: grpcServerSubsystem, - Name: "health", - Help: "GRPC Server Endpoint health", - }, []string{endpointLabel}), - } -} - -func (m *grpcServerMetrics) MarkHealthy(endpoint string) { - m.endpointHealth.With(prometheus.Labels{endpointLabel: endpoint}).Set(float64(1)) -} - -func (m *grpcServerMetrics) MarkUnhealthy(endpoint string) { - m.endpointHealth.With(prometheus.Labels{endpointLabel: endpoint}).Set(float64(0)) -} diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go deleted file mode 100644 index d0cb8131f..000000000 --- a/internal/metrics/innerring.go +++ /dev/null @@ -1,86 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -// InnerRingServiceMetrics contains metrics collected by inner ring. -type InnerRingServiceMetrics struct { - epoch prometheus.Gauge - health prometheus.Gauge - eventDuration *prometheus.HistogramVec - morphCacheMetrics *morphCacheMetrics - logMetrics logger.LogMetrics - multinet *multinetMetrics - // nolint: unused - appInfo *ApplicationInfo -} - -// NewInnerRingMetrics returns new instance of metrics collectors for inner ring. -func NewInnerRingMetrics() *InnerRingServiceMetrics { - var ( - epoch = metrics.NewGauge(prometheus.GaugeOpts{ - Namespace: innerRingNamespace, - Subsystem: innerRingSubsystem, - Name: "epoch", - Help: "Current epoch as seen by inner-ring node.", - }) - health = metrics.NewGauge(prometheus.GaugeOpts{ - Namespace: innerRingNamespace, - Subsystem: innerRingSubsystem, - Name: "health", - Help: "Current inner-ring node state.", - }) - eventDuration = metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: innerRingNamespace, - Subsystem: innerRingSubsystem, - Name: "event_duration_seconds", - Help: "Duration of processing of inner-ring events", - }, []string{typeLabel, successLabel}) - ) - - return &InnerRingServiceMetrics{ - epoch: epoch, - health: health, - eventDuration: eventDuration, - morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace), - appInfo: NewApplicationInfo(misc.Version), - logMetrics: logger.NewLogMetrics(innerRingNamespace), - multinet: newMultinetMetrics(innerRingNamespace), - } -} - -// SetEpoch updates epoch metrics. -func (m *InnerRingServiceMetrics) SetEpoch(epoch uint64) { - m.epoch.Set(float64(epoch)) -} - -// SetHealth updates health metrics. -func (m *InnerRingServiceMetrics) SetHealth(s int32) { - m.health.Set(float64(s)) -} - -func (m *InnerRingServiceMetrics) AddEvent(d time.Duration, typ string, success bool) { - m.eventDuration.With(prometheus.Labels{ - typeLabel: typ, - successLabel: strconv.FormatBool(success), - }).Observe(d.Seconds()) -} - -func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics { - return m.morphCacheMetrics -} - -func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics { - return m.logMetrics -} - -func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics { - return m.multinet -} diff --git a/internal/metrics/metabase.go b/internal/metrics/metabase.go deleted file mode 100644 index 640c7f721..000000000 --- a/internal/metrics/metabase.go +++ /dev/null @@ -1,54 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type MetabaseMetrics interface { - SetMode(shardID, path string, mode string) - Close(shardID, path string) - - MethodDuration(shardID, path string, method string, d time.Duration, success bool) -} - -func newMetabaseMetrics() *metabaseMetrics { - return &metabaseMetrics{ - mode: newShardIDPathMode(metabaseSubSystem, "mode", "Metabase mode"), - reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: metabaseSubSystem, - Name: "request_duration_seconds", - Help: "Accumulated Metabase request process duration", - }, []string{shardIDLabel, successLabel, pathLabel, methodLabel}), - } -} - -type metabaseMetrics struct { - mode *shardIDPathModeValue - reqDuration *prometheus.HistogramVec -} - -func (m *metabaseMetrics) SetMode(shardID, path string, mode string) { - m.mode.SetMode(shardID, path, mode) -} - -func (m *metabaseMetrics) Close(shardID, path string) { - m.mode.SetMode(shardID, path, closedMode) - m.reqDuration.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) -} - -func (m *metabaseMetrics) MethodDuration(shardID, path string, method string, d time.Duration, success bool) { - m.reqDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - successLabel: strconv.FormatBool(success), - methodLabel: method, - }).Observe(d.Seconds()) -} diff --git a/internal/metrics/mode.go b/internal/metrics/mode.go deleted file mode 100644 index a9ac47acd..000000000 --- a/internal/metrics/mode.go +++ /dev/null @@ -1,89 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type shardIDModeValue struct { - modeValue *prometheus.GaugeVec -} - -func newShardIDMode(subsystem, name, help string) *shardIDModeValue { - return &shardIDModeValue{ - modeValue: metrics.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: help, - }, []string{shardIDLabel, modeLabel}), - } -} - -func (m *shardIDModeValue) SetMode(shardID, mode string) { - m.modeValue.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) - - m.modeValue.With(prometheus.Labels{ - shardIDLabel: shardID, - modeLabel: mode, - }).Set(1) -} - -func (m *shardIDModeValue) Delete(shardID string) { - m.modeValue.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) -} - -type shardIDPathModeValue struct { - modeValue *prometheus.GaugeVec -} - -func newShardIDPathMode(subsystem, name, help string) *shardIDPathModeValue { - return &shardIDPathModeValue{ - modeValue: metrics.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: help, - }, []string{shardIDLabel, pathLabel, modeLabel}), - } -} - -func (m *shardIDPathModeValue) SetMode(shardID, path, mode string) { - m.modeValue.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) - - m.modeValue.With(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - modeLabel: mode, - }).Set(1) -} - -func (m *shardIDPathModeValue) Delete(shardID, path string) { - m.modeValue.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - pathLabel: path, - }) -} - -func (m *shardIDPathModeValue) DeleteByShardID(shardID string) { - m.modeValue.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) -} - -func modeFromBool(readOnly bool) string { - modeValue := readWriteMode - if readOnly { - modeValue = readOnlyMode - } - return modeValue -} diff --git a/internal/metrics/morph.go b/internal/metrics/morph.go deleted file mode 100644 index 02d7517bc..000000000 --- a/internal/metrics/morph.go +++ /dev/null @@ -1,72 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type morphClientMetrics struct { - switchCount prometheus.Counter - lastBlock prometheus.Gauge - notificationCount *prometheus.CounterVec - invokeDuration *prometheus.HistogramVec -} - -func newMorphClientMetrics() *morphClientMetrics { - return &morphClientMetrics{ - switchCount: metrics.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: morphSubsystem, - Name: "switches_total", - Help: "Number of endpoint switches", - }), - lastBlock: metrics.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: morphSubsystem, - Name: "last_block", - Help: "Index of the last received block", - }), - notificationCount: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: morphSubsystem, - Name: "notifications_total", - Help: "Number of notifications received by notification type", - }, []string{notificationTypeLabel}), - invokeDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: morphSubsystem, - Name: "invoke_duration_seconds", - Help: "Cummulative duration of contract invocations", - }, []string{invokeTypeLabel, contractLabel, methodLabel, successLabel}), - } -} - -func (m *morphClientMetrics) IncSwitchCount() { - m.switchCount.Inc() -} - -func (m *morphClientMetrics) SetLastBlock(index uint32) { - m.lastBlock.Set(float64(index)) -} - -func (m *morphClientMetrics) IncNotificationCount(typ string) { - m.notificationCount.With( - prometheus.Labels{ - notificationTypeLabel: typ, - }, - ).Inc() -} - -func (m *morphClientMetrics) ObserveInvoke(typ string, contract string, method string, success bool, d time.Duration) { - m.invokeDuration.With( - prometheus.Labels{ - invokeTypeLabel: typ, - contractLabel: contract, - methodLabel: method, - successLabel: strconv.FormatBool(success), - }, - ).Observe(d.Seconds()) -} diff --git a/internal/metrics/morphcache.go b/internal/metrics/morphcache.go deleted file mode 100644 index 388cb11e8..000000000 --- a/internal/metrics/morphcache.go +++ /dev/null @@ -1,39 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type MorphCacheMetrics interface { - AddMethodDuration(method string, success bool, d time.Duration) -} - -type morphCacheMetrics struct { - methodDuration *prometheus.HistogramVec -} - -var _ MorphCacheMetrics = (*morphCacheMetrics)(nil) - -func newMorphCacheMetrics(ns string) *morphCacheMetrics { - return &morphCacheMetrics{ - methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: morphCacheSubsystem, - Name: "request_duration_seconds", - Help: "Morph cache request process duration", - }, []string{successLabel, methodLabel}), - } -} - -func (m *morphCacheMetrics) AddMethodDuration(method string, success bool, d time.Duration) { - m.methodDuration.With( - prometheus.Labels{ - successLabel: strconv.FormatBool(success), - methodLabel: method, - }, - ).Observe(d.Seconds()) -} diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go deleted file mode 100644 index 6b1f99d46..000000000 --- a/internal/metrics/multinet.go +++ /dev/null @@ -1,35 +0,0 @@ -package metrics - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type multinetMetrics struct { - dials *prometheus.GaugeVec -} - -type MultinetMetrics interface { - Dial(sourceIP string, success bool) -} - -func newMultinetMetrics(ns string) *multinetMetrics { - return &multinetMetrics{ - dials: metrics.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: multinetSubsystem, - Name: "dial_count", - Help: "Dials count performed by multinet", - }, []string{sourceIPLabel, successLabel}), - } -} - -func (m *multinetMetrics) Dial(sourceIP string, success bool) { - m.dials.With(prometheus.Labels{ - sourceIPLabel: sourceIP, - successLabel: strconv.FormatBool(success), - }).Inc() -} diff --git a/internal/metrics/node.go b/internal/metrics/node.go deleted file mode 100644 index 8ade19eb2..000000000 --- a/internal/metrics/node.go +++ /dev/null @@ -1,134 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/misc" - morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type NodeMetrics struct { - engine *engineMetrics - state *stateMetrics - replicator *replicatorMetrics - objectService *objectServiceMetrics - treeService *treeServiceMetrics - epoch prometheus.Gauge - fstree *fstreeMetrics - blobstore *blobstoreMetrics - blobobvnizca *blobovnicza - metabase *metabaseMetrics - pilorama *piloramaMetrics - grpc *grpcServerMetrics - policer *policerMetrics - morphClient *morphClientMetrics - morphCache *morphCacheMetrics - log logger.LogMetrics - multinet *multinetMetrics - qos *QoSMetrics - // nolint: unused - appInfo *ApplicationInfo -} - -func NewNodeMetrics() *NodeMetrics { - return &NodeMetrics{ - objectService: newObjectServiceMetrics(), - engine: newEngineMetrics(), - state: newStateMetrics(), - replicator: newReplicatorMetrics(), - treeService: newTreeServiceMetrics(), - epoch: metrics.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: innerRingSubsystem, - Name: "epoch", - Help: "Current epoch as seen by inner-ring node.", - }), - fstree: newFSTreeMetrics(), - blobstore: newBlobstoreMetrics(), - blobobvnizca: newBlobovnicza(), - metabase: newMetabaseMetrics(), - pilorama: newPiloramaMetrics(), - grpc: newGrpcServerMetrics(), - policer: newPolicerMetrics(), - morphClient: newMorphClientMetrics(), - morphCache: newMorphCacheMetrics(namespace), - log: logger.NewLogMetrics(namespace), - appInfo: NewApplicationInfo(misc.Version), - multinet: newMultinetMetrics(namespace), - qos: newQoSMetrics(), - } -} - -// SetEpoch updates epoch metric. -func (m *NodeMetrics) SetEpoch(epoch uint64) { - m.epoch.Set(float64(epoch)) -} - -func (m *NodeMetrics) TreeService() TreeMetricsRegister { - return m.treeService -} - -func (m *NodeMetrics) Replicator() ReplicatorMetrics { - return m.replicator -} - -func (m *NodeMetrics) ObjectService() ObjectServiceMetrics { - return m.objectService -} - -func (m *NodeMetrics) Engine() EngineMetrics { - return m.engine -} - -func (m *NodeMetrics) State() StateMetrics { - return m.state -} - -func (m *NodeMetrics) FSTree() FSTreeMetrics { - return m.fstree -} - -func (m *NodeMetrics) Blobstore() BlobstoreMetrics { - return m.blobstore -} - -func (m *NodeMetrics) BlobobvnizcaTreeMetrics() BlobobvnizcaMetrics { - return m.blobobvnizca -} - -func (m *NodeMetrics) MetabaseMetrics() MetabaseMetrics { - return m.metabase -} - -func (m *NodeMetrics) PiloramaMetrics() PiloramaMetrics { - return m.pilorama -} - -func (m *NodeMetrics) GrpcServerMetrics() GrpcServerMetrics { - return m.grpc -} - -func (m *NodeMetrics) PolicerMetrics() PolicerMetrics { - return m.policer -} - -func (m *NodeMetrics) MorphClientMetrics() morphmetrics.Register { - return m.morphClient -} - -func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics { - return m.morphCache -} - -func (m *NodeMetrics) LogMetrics() logger.LogMetrics { - return m.log -} - -func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { - return m.multinet -} - -func (m *NodeMetrics) QoSMetrics() *QoSMetrics { - return m.qos -} diff --git a/internal/metrics/object.go b/internal/metrics/object.go deleted file mode 100644 index e4f6dfde1..000000000 --- a/internal/metrics/object.go +++ /dev/null @@ -1,60 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type ObjectServiceMetrics interface { - AddRequestDuration(method string, d time.Duration, success bool, ioTag string) - AddPayloadSize(method string, size int) -} - -type objectServiceMetrics struct { - methodDuration *prometheus.HistogramVec - payloadCounter *prometheus.CounterVec - ioTagOpsCounter *prometheus.CounterVec -} - -func newObjectServiceMetrics() *objectServiceMetrics { - return &objectServiceMetrics{ - methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: objectSubsystem, - Name: "request_duration_seconds", - Help: "Object Service request process duration", - }, []string{methodLabel, successLabel}), - payloadCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: objectSubsystem, - Name: "request_payload_bytes", - Help: "Object Service request payload", - }, []string{methodLabel}), - ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: objectSubsystem, - Name: "requests_total", - Help: "Count of requests for each IO tag", - }, []string{methodLabel, ioTagLabel}), - } -} - -func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { - m.methodDuration.With(prometheus.Labels{ - methodLabel: method, - successLabel: strconv.FormatBool(success), - }).Observe(d.Seconds()) - m.ioTagOpsCounter.With(prometheus.Labels{ - ioTagLabel: ioTag, - methodLabel: method, - }).Inc() -} - -func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { - m.payloadCounter.With(prometheus.Labels{ - methodLabel: method, - }).Add(float64(size)) -} diff --git a/internal/metrics/pilorama.go b/internal/metrics/pilorama.go deleted file mode 100644 index c669275fe..000000000 --- a/internal/metrics/pilorama.go +++ /dev/null @@ -1,53 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type PiloramaMetrics interface { - SetMode(shardID string, m mode.ComponentMode) - Close(shardID string) - - AddMethodDuration(shardID string, method string, d time.Duration, success bool) -} - -func newPiloramaMetrics() *piloramaMetrics { - return &piloramaMetrics{ - mode: newShardIDMode(piloramaSubSystem, "mode", "Pilorama mode"), - reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: piloramaSubSystem, - Name: "request_duration_seconds", - Help: "Accumulated Pilorama request process duration", - }, []string{shardIDLabel, successLabel, methodLabel}), - } -} - -type piloramaMetrics struct { - mode *shardIDModeValue - reqDuration *prometheus.HistogramVec -} - -func (m *piloramaMetrics) SetMode(shardID string, mode mode.ComponentMode) { - m.mode.SetMode(shardID, mode.String()) -} - -func (m *piloramaMetrics) AddMethodDuration(shardID string, method string, d time.Duration, success bool) { - m.reqDuration.With(prometheus.Labels{ - shardIDLabel: shardID, - successLabel: strconv.FormatBool(success), - methodLabel: method, - }).Observe(d.Seconds()) -} - -func (m *piloramaMetrics) Close(shardID string) { - m.mode.SetMode(shardID, closedMode) - m.reqDuration.DeletePartialMatch(prometheus.Labels{ - shardIDLabel: shardID, - }) -} diff --git a/internal/metrics/policer.go b/internal/metrics/policer.go deleted file mode 100644 index e4bdc944e..000000000 --- a/internal/metrics/policer.go +++ /dev/null @@ -1,29 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type PolicerMetrics interface { - IncProcessedObjects() -} - -type policerMetrics struct { - processedObjectsCounter prometheus.Counter -} - -func newPolicerMetrics() *policerMetrics { - return &policerMetrics{ - processedObjectsCounter: metrics.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: policerSubsystem, - Name: "processed_objects_total", - Help: "Total number of objects processed by policer", - }), - } -} - -func (m *policerMetrics) IncProcessedObjects() { - m.processedObjectsCounter.Inc() -} diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go deleted file mode 100644 index be6878142..000000000 --- a/internal/metrics/qos.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type QoSMetrics struct { - opsCounter *prometheus.GaugeVec -} - -func newQoSMetrics() *QoSMetrics { - return &QoSMetrics{ - opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: qosSubsystem, - Name: "operations_total", - Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", - }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), - } -} - -func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) { - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "pending", - }).Set(float64(pending)) - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "in_progress", - }).Set(float64(inProgress)) - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "completed", - }).Set(float64(completed)) - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "resource_exhausted", - }).Set(float64(resourceExhausted)) -} - -func (m *QoSMetrics) Close(shardID string) { - m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) -} diff --git a/internal/metrics/replicator.go b/internal/metrics/replicator.go deleted file mode 100644 index ca72a3031..000000000 --- a/internal/metrics/replicator.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type ReplicatorMetrics interface { - IncInFlightRequest() - DecInFlightRequest() - IncProcessedObjects() - AddPayloadSize(size int64) -} - -type replicatorMetrics struct { - inFlightRequests prometheus.Gauge - processedObjects prometheus.Counter - totalReplicatedPayloadSize prometheus.Counter -} - -func (m *replicatorMetrics) IncInFlightRequest() { - m.inFlightRequests.Inc() -} - -func (m *replicatorMetrics) DecInFlightRequest() { - m.inFlightRequests.Dec() -} - -func (m *replicatorMetrics) IncProcessedObjects() { - m.processedObjects.Inc() -} - -func (m *replicatorMetrics) AddPayloadSize(size int64) { - m.totalReplicatedPayloadSize.Add(float64(size)) -} - -func newReplicatorMetrics() *replicatorMetrics { - return &replicatorMetrics{ - inFlightRequests: newReplicatorGauge("in_flight_requests_total", "Number of in-flight requests"), - processedObjects: newReplicatorCounter("processed_objects_total", "Number of objects processed since the node startup"), - totalReplicatedPayloadSize: newReplicatorCounter("total_replicated_payload_size_bytes", "Total size of payloads replicated"), - } -} - -func newReplicatorCounter(name, help string) prometheus.Counter { - return metrics.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: replicatorSubsystem, - Name: name, - Help: help, - }) -} - -func newReplicatorGauge(name, help string) prometheus.Gauge { - return metrics.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: replicatorSubsystem, - Name: name, - Help: help, - }) -} diff --git a/internal/metrics/state.go b/internal/metrics/state.go deleted file mode 100644 index 243f648e5..000000000 --- a/internal/metrics/state.go +++ /dev/null @@ -1,29 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type StateMetrics interface { - SetHealth(s int32) -} - -type stateMetrics struct { - healthCheck prometheus.Gauge -} - -func newStateMetrics() *stateMetrics { - return &stateMetrics{ - healthCheck: metrics.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: stateSubsystem, - Name: "health", - Help: "Current Node state", - }), - } -} - -func (m *stateMetrics) SetHealth(s int32) { - m.healthCheck.Set(float64(s)) -} diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go deleted file mode 100644 index e192c4398..000000000 --- a/internal/metrics/treeservice.go +++ /dev/null @@ -1,79 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type TreeMetricsRegister interface { - AddReplicateTaskDuration(time.Duration, bool) - AddReplicateWaitDuration(time.Duration, bool) - AddSyncDuration(time.Duration, bool) - AddOperation(string, string) -} - -type treeServiceMetrics struct { - replicateTaskDuration *prometheus.HistogramVec - replicateWaitDuration *prometheus.HistogramVec - syncOpDuration *prometheus.HistogramVec - ioTagOpsCounter *prometheus.CounterVec -} - -var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) - -func newTreeServiceMetrics() *treeServiceMetrics { - return &treeServiceMetrics{ - replicateTaskDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: treeServiceSubsystem, - Name: "replicate_task_duration_seconds", - Help: "Duration of individual replication tasks executed as part of replication loops", - }, []string{successLabel}), - replicateWaitDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: treeServiceSubsystem, - Name: "replicate_wait_duration_seconds", - Help: "Duration of overall waiting time for replication loops", - }, []string{successLabel}), - syncOpDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: treeServiceSubsystem, - Name: "sync_duration_seconds", - Help: "Duration of synchronization operations", - }, []string{successLabel}), - ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: treeServiceSubsystem, - Name: "requests_total", - Help: "Count of requests for each IO tag", - }, []string{methodLabel, ioTagLabel}), - } -} - -func (m *treeServiceMetrics) AddReplicateTaskDuration(d time.Duration, success bool) { - m.replicateTaskDuration.With(prometheus.Labels{ - successLabel: strconv.FormatBool(success), - }).Observe(d.Seconds()) -} - -func (m *treeServiceMetrics) AddReplicateWaitDuration(d time.Duration, success bool) { - m.replicateWaitDuration.With(prometheus.Labels{ - successLabel: strconv.FormatBool(success), - }).Observe(d.Seconds()) -} - -func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) { - m.syncOpDuration.With(prometheus.Labels{ - successLabel: strconv.FormatBool(success), - }).Observe(d.Seconds()) -} - -func (m *treeServiceMetrics) AddOperation(op string, ioTag string) { - m.ioTagOpsCounter.With(prometheus.Labels{ - ioTagLabel: ioTag, - methodLabel: op, - }).Inc() -} diff --git a/internal/metrics/types.go b/internal/metrics/types.go deleted file mode 100644 index 6a76248bf..000000000 --- a/internal/metrics/types.go +++ /dev/null @@ -1,17 +0,0 @@ -package metrics - -import ( - "strconv" -) - -type NullBool struct { - Bool bool - Valid bool // Valid is true if Bool is not NULL -} - -func (v NullBool) String() string { - if !v.Valid { - return "" - } - return strconv.FormatBool(v.Bool) -} diff --git a/internal/metrics/writecache.go b/internal/metrics/writecache.go deleted file mode 100644 index 1b708f710..000000000 --- a/internal/metrics/writecache.go +++ /dev/null @@ -1,108 +0,0 @@ -package metrics - -import ( - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type WriteCacheMetrics interface { - AddMethodDuration(shardID, path, storageType, method string, success bool, d time.Duration) - SetActualCount(shardID, path, storageType string, count uint64) - SetEstimateSize(shardID, path, storageType string, size uint64) - SetMode(shardID, mode string) - IncOperationCounter(shardID, path, storageType, operation string, success NullBool) - Close(shardID, path string) -} - -type writeCacheMetrics struct { - methodDuration *prometheus.HistogramVec - operationCounter *prometheus.CounterVec - - actualCount *prometheus.GaugeVec - - estimatedSize *prometheus.GaugeVec - - mode *shardIDModeValue -} - -func newWriteCacheMetrics() *writeCacheMetrics { - return &writeCacheMetrics{ - methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: writeCacheSubsystem, - Name: "request_duration_seconds", - Help: "Writecache request process duration", - }, []string{shardIDLabel, successLabel, storageLabel, methodLabel, pathLabel}), - operationCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: writeCacheSubsystem, - Name: "operations_total", - Help: "The number of writecache operations processed", - }, []string{shardIDLabel, storageLabel, successLabel, operationLabel, pathLabel}), - actualCount: newWCGaugeVec("actual_objects_total", "Actual objects count in writecache", []string{shardIDLabel, storageLabel, pathLabel}), - estimatedSize: newWCGaugeVec("estimated_size_bytes", "Estimated writecache size", []string{shardIDLabel, storageLabel, pathLabel}), - mode: newShardIDMode(writeCacheSubsystem, "mode_info", "Writecache mode value"), - } -} - -func (m *writeCacheMetrics) AddMethodDuration(shardID, path, storageType, method string, success bool, d time.Duration) { - m.methodDuration.With( - prometheus.Labels{ - shardIDLabel: shardID, - successLabel: strconv.FormatBool(success), - storageLabel: storageType, - methodLabel: method, - pathLabel: path, - }, - ).Observe(d.Seconds()) -} - -func (m *writeCacheMetrics) SetActualCount(shardID, path, storageType string, count uint64) { - m.actualCount.With(prometheus.Labels{ - shardIDLabel: shardID, - storageLabel: storageType, - pathLabel: path, - }).Set(float64(count)) -} - -func (m *writeCacheMetrics) SetEstimateSize(shardID, path, storageType string, size uint64) { - m.estimatedSize.With(prometheus.Labels{ - shardIDLabel: shardID, - storageLabel: storageType, - pathLabel: path, - }).Set(float64(size)) -} - -func (m *writeCacheMetrics) SetMode(shardID string, mode string) { - m.mode.SetMode(shardID, mode) -} - -func (m *writeCacheMetrics) IncOperationCounter(shardID, path, storageType, operation string, success NullBool) { - m.operationCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - storageLabel: storageType, - operationLabel: operation, - successLabel: success.String(), - pathLabel: path, - }).Inc() -} - -func (m *writeCacheMetrics) Close(shardID, path string) { - m.mode.Delete(shardID) - m.methodDuration.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path}) - m.operationCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path}) - m.actualCount.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path}) - m.estimatedSize.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path}) -} - -func newWCGaugeVec(name, help string, labels []string) *prometheus.GaugeVec { - return metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: writeCacheSubsystem, - Name: name, - Help: help, - }, labels) -} diff --git a/internal/net/config.go b/internal/net/config.go deleted file mode 100644 index b84ac3b35..000000000 --- a/internal/net/config.go +++ /dev/null @@ -1,69 +0,0 @@ -package net - -import ( - "errors" - "fmt" - "net/netip" - "slices" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/multinet" -) - -var errEmptySourceIPList = errors.New("empty source IP list") - -type Subnet struct { - Prefix string - SourceIPs []string -} - -type Config struct { - Enabled bool - Subnets []Subnet - Balancer string - Restrict bool - FallbackDelay time.Duration - Metrics metrics.MultinetMetrics -} - -func (c Config) toMultinetConfig() (multinet.Config, error) { - var subnets []multinet.Subnet - for _, s := range c.Subnets { - var ms multinet.Subnet - p, err := netip.ParsePrefix(s.Prefix) - if err != nil { - return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err) - } - ms.Prefix = p - for _, ip := range s.SourceIPs { - addr, err := netip.ParseAddr(ip) - if err != nil { - return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err) - } - ms.SourceIPs = append(ms.SourceIPs, addr) - } - if len(ms.SourceIPs) == 0 { - return multinet.Config{}, errEmptySourceIPList - } - subnets = append(subnets, ms) - } - return multinet.Config{ - Subnets: subnets, - Balancer: multinet.BalancerType(c.Balancer), - Restrict: c.Restrict, - FallbackDelay: c.FallbackDelay, - Dialer: newDefaulDialer(), - EventHandler: newEventHandler(c.Metrics), - }, nil -} - -func (c Config) equals(other Config) bool { - return c.Enabled == other.Enabled && - slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool { - return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs) - }) && - c.Balancer == other.Balancer && - c.Restrict == other.Restrict && - c.FallbackDelay == other.FallbackDelay -} diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go deleted file mode 100644 index 6265f1860..000000000 --- a/internal/net/dial_target.go +++ /dev/null @@ -1,54 +0,0 @@ -// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go - -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package net - -import ( - "net/url" - "strings" -) - -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { - net := "tcp" - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - return n, target[m1+1:] - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr := t.Path - if scheme == "unix" { - if addr == "" { - addr = t.Host - } - return scheme, addr - } - } - return net, target -} diff --git a/internal/net/dialer.go b/internal/net/dialer.go deleted file mode 100644 index daf0f815f..000000000 --- a/internal/net/dialer.go +++ /dev/null @@ -1,39 +0,0 @@ -package net - -import ( - "context" - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -type Dialer interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) { - return d.DialContext(ctx, "tcp", address) -} - -func newDefaulDialer() net.Dialer { - // From `grpc.WithContextDialer` comment: - // - // Note: All supported releases of Go (as of December 2023) override the OS - // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive - // with OS defaults for keepalive time and interval, use a net.Dialer that sets - // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket - // option to true from the Control field. For a concrete example of how to do - // this, see internal.NetDialerWithTCPKeepalive(). - // - // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432 - return net.Dialer{ - KeepAlive: time.Duration(-1), - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go deleted file mode 100644 index 3d94dedc7..000000000 --- a/internal/net/dialer_source.go +++ /dev/null @@ -1,83 +0,0 @@ -package net - -import ( - "context" - "net" - "sync" - - "git.frostfs.info/TrueCloudLab/multinet" -) - -type DialerSource struct { - guard sync.RWMutex - - c Config - - md multinet.Dialer -} - -func NewDialerSource(c Config) (*DialerSource, error) { - result := &DialerSource{} - if err := result.build(c); err != nil { - return nil, err - } - return result, nil -} - -func (s *DialerSource) build(c Config) error { - if c.Enabled { - mc, err := c.toMultinetConfig() - if err != nil { - return err - } - md, err := multinet.NewDialer(mc) - if err != nil { - return err - } - s.md = md - s.c = c - return nil - } - s.md = nil - s.c = c - return nil -} - -// GrpcContextDialer returns grpc.WithContextDialer func. -// Returns nil if multinet disabled. -func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) { - s.guard.RLock() - defer s.guard.RUnlock() - - if s.c.Enabled { - return func(ctx context.Context, address string) (net.Conn, error) { - network, address := parseDialTarget(address) - return s.md.DialContext(ctx, network, address) - } - } - return nil -} - -// NetContextDialer returns net.DialContext dial function. -// Returns nil if multinet disabled. -func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) { - s.guard.RLock() - defer s.guard.RUnlock() - - if s.c.Enabled { - return func(ctx context.Context, network, address string) (net.Conn, error) { - return s.md.DialContext(ctx, network, address) - } - } - return nil -} - -func (s *DialerSource) Update(c Config) error { - s.guard.Lock() - defer s.guard.Unlock() - - if s.c.equals(c) { - return nil - } - return s.build(c) -} diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go deleted file mode 100644 index 024e5cf7c..000000000 --- a/internal/net/event_handler.go +++ /dev/null @@ -1,29 +0,0 @@ -package net - -import ( - "net" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/multinet" -) - -var _ multinet.EventHandler = (*metricsEventHandler)(nil) - -type metricsEventHandler struct { - m metrics.MultinetMetrics -} - -func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) { - sourceIPString := "undefined" - if sourceIP != nil { - sourceIPString = sourceIP.Network() + "://" + sourceIP.String() - } - m.m.Dial(sourceIPString, err == nil) -} - -func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler { - if m == nil { - return nil - } - return &metricsEventHandler{m: m} -} diff --git a/internal/qos/config.go b/internal/qos/config.go deleted file mode 100644 index d90b403b5..000000000 --- a/internal/qos/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package qos - -import ( - "math" - "time" -) - -const ( - NoLimit int64 = math.MaxInt64 - DefaultIdleTimeout = 5 * time.Minute -) - -type LimiterConfig struct { - Read OpConfig - Write OpConfig -} - -type OpConfig struct { - MaxWaitingOps int64 - MaxRunningOps int64 - IdleTimeout time.Duration - Tags []IOTagConfig -} - -type IOTagConfig struct { - Tag string - Weight *float64 - LimitOps *float64 - ReservedOps *float64 - Prohibited bool -} diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go deleted file mode 100644 index 58cd9e52c..000000000 --- a/internal/qos/grpc.go +++ /dev/null @@ -1,86 +0,0 @@ -package qos - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "google.golang.org/grpc" -) - -func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String()) - return handler(ctx, req) - } -} - -func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - rawTag, ok := tagging.IOTagFromContext(ctx) - if !ok { - return invoker(ctx, method, req, reply, cc, opts...) - } - tag, err := FromRawString(rawTag) - if err != nil { - tag = IOTagClient - } - if tag.IsLocal() { - tag = IOTagInternal - } - ctx = tagging.ContextWithIOTag(ctx, tag.String()) - return invoker(ctx, method, req, reply, cc, opts...) - } -} - -func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor { - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - rawTag, ok := tagging.IOTagFromContext(ctx) - if !ok { - return streamer(ctx, desc, cc, method, opts...) - } - tag, err := FromRawString(rawTag) - if err != nil { - tag = IOTagClient - } - if tag.IsLocal() { - tag = IOTagInternal - } - ctx = tagging.ContextWithIOTag(ctx, tag.String()) - return streamer(ctx, desc, cc, method, opts...) - } -} - -func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() { - return handler(ctx, req) - } - - release, ok := getLimiter().Acquire(info.FullMethod) - if !ok { - return nil, new(apistatus.ResourceExhausted) - } - defer release() - - return handler(ctx, req) - } -} - -//nolint:contextcheck (grpc.ServerStream manages the context itself) -func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor { - return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() { - return handler(srv, ss) - } - - release, ok := getLimiter().Acquire(info.FullMethod) - if !ok { - return new(apistatus.ResourceExhausted) - } - defer release() - - return handler(srv, ss) - } -} diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go deleted file mode 100644 index 7d0826754..000000000 --- a/internal/qos/grpc_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package qos_test - -import ( - "context" - "errors" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -const ( - okKey = "ok" -) - -var ( - errTest = errors.New("mock") - errWrongTag = errors.New("wrong tag") - errNoTag = errors.New("failed to get tag from context") - errResExhausted *apistatus.ResourceExhausted - tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} -) - -type mockGRPCServerStream struct { - grpc.ServerStream - - ctx context.Context -} - -func (m *mockGRPCServerStream) Context() context.Context { - return m.ctx -} - -type limiter struct { - acquired bool - released bool -} - -func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { - l.acquired = true - if key != okKey { - return nil, false - } - return func() { l.released = true }, true -} - -func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { - interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) - handler := func(ctx context.Context, req any) (any, error) { - return nil, errTest - } - _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) - return err -} - -func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { - interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) - handler := func(srv any, stream grpc.ServerStream) error { - return errTest - } - err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ - FullMethod: methodName, - }, handler) - return err -} - -func Test_MaxActiveRPCLimiter(t *testing.T) { - // UnaryServerInterceptor - t.Run("unary fail", func(t *testing.T) { - var lim limiter - - err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") - require.ErrorAs(t, err, &errResExhausted) - require.True(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("unary pass critical", func(t *testing.T) { - var lim limiter - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - - err := unaryMaxActiveRPCLimiter(ctx, &lim, "") - require.ErrorIs(t, err, errTest) - require.False(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("unary pass", func(t *testing.T) { - var lim limiter - - err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) - require.ErrorIs(t, err, errTest) - require.True(t, lim.acquired) - require.True(t, lim.released) - }) - // StreamServerInterceptor - t.Run("stream fail", func(t *testing.T) { - var lim limiter - - err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") - require.ErrorAs(t, err, &errResExhausted) - require.True(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("stream pass critical", func(t *testing.T) { - var lim limiter - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - - err := streamMaxActiveRPCLimiter(ctx, &lim, "") - require.ErrorIs(t, err, errTest) - require.False(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("stream pass", func(t *testing.T) { - var lim limiter - - err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) - require.ErrorIs(t, err, errTest) - require.True(t, lim.acquired) - require.True(t, lim.released) - }) -} - -func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { - interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() - called := false - handler := func(ctx context.Context, req any) (any, error) { - called = true - if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { - return nil, nil - } - return nil, errWrongTag - } - _, err := interceptor(context.Background(), nil, nil, handler) - require.NoError(t, err) - require.True(t, called) -} - -func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { - interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() - - // check context with no value - called := false - invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - called = true - if _, ok := tagging.IOTagFromContext(ctx); ok { - return fmt.Errorf("%v: expected no IO tags", errWrongTag) - } - return nil - } - require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) - require.True(t, called) - - // check context for internal tag - targetTag := qos.IOTagInternal.String() - invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - raw, ok := tagging.IOTagFromContext(ctx) - if !ok { - return errNoTag - } - if raw != targetTag { - return errWrongTag - } - return nil - } - for _, tag := range tags { - ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) - require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) - } - - // check context for client tag - ctx := tagging.ContextWithIOTag(context.Background(), "") - targetTag = qos.IOTagClient.String() - require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) -} - -func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { - interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() - - // check context with no value - called := false - streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - called = true - if _, ok := tagging.IOTagFromContext(ctx); ok { - return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) - } - return nil, nil - } - _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) - require.True(t, called) - require.NoError(t, err) - - // check context for internal tag - targetTag := qos.IOTagInternal.String() - streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - raw, ok := tagging.IOTagFromContext(ctx) - if !ok { - return nil, errNoTag - } - if raw != targetTag { - return nil, errWrongTag - } - return nil, nil - } - for _, tag := range tags { - ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) - _, err := interceptor(ctx, nil, nil, "", streamer, nil) - require.NoError(t, err) - } - - // check context for client tag - ctx := tagging.ContextWithIOTag(context.Background(), "") - targetTag = qos.IOTagClient.String() - _, err = interceptor(ctx, nil, nil, "", streamer, nil) - require.NoError(t, err) -} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go deleted file mode 100644 index 2d7de32fc..000000000 --- a/internal/qos/limiter.go +++ /dev/null @@ -1,246 +0,0 @@ -package qos - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -const ( - defaultIdleTimeout time.Duration = 0 - defaultShare float64 = 1.0 - minusOne = ^uint64(0) - - defaultMetricsCollectTimeout = 5 * time.Second -) - -type ReleaseFunc scheduling.ReleaseFunc - -type Limiter interface { - ReadRequest(context.Context) (ReleaseFunc, error) - WriteRequest(context.Context) (ReleaseFunc, error) - SetParentID(string) - SetMetrics(Metrics) - Close() -} - -type scheduler interface { - RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error) - Close() -} - -func NewLimiter(c LimiterConfig) (Limiter, error) { - if err := c.Validate(); err != nil { - return nil, err - } - readScheduler, err := createScheduler(c.Read) - if err != nil { - return nil, fmt.Errorf("create read scheduler: %w", err) - } - writeScheduler, err := createScheduler(c.Write) - if err != nil { - return nil, fmt.Errorf("create write scheduler: %w", err) - } - l := &mClockLimiter{ - readScheduler: readScheduler, - writeScheduler: writeScheduler, - closeCh: make(chan struct{}), - wg: &sync.WaitGroup{}, - readStats: createStats(), - writeStats: createStats(), - } - l.shardID.Store(&shardID{}) - l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}}) - l.startMetricsCollect() - return l, nil -} - -func createScheduler(config OpConfig) (scheduler, error) { - if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { - return newSemaphoreScheduler(config.MaxRunningOps), nil - } - return scheduling.NewMClock( - uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps), - converToSchedulingTags(config.Tags), config.IdleTimeout) -} - -func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { - result := make(map[string]scheduling.TagInfo) - for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { - result[tag.String()] = scheduling.TagInfo{ - Share: defaultShare, - } - } - for _, l := range limits { - v := result[l.Tag] - if l.Weight != nil && *l.Weight != 0 { - v.Share = *l.Weight - } - if l.LimitOps != nil && *l.LimitOps != 0 { - v.LimitIOPS = l.LimitOps - } - if l.ReservedOps != nil && *l.ReservedOps != 0 { - v.ReservedIOPS = l.ReservedOps - } - v.Prohibited = l.Prohibited - result[l.Tag] = v - } - return result -} - -var ( - _ Limiter = (*noopLimiter)(nil) - releaseStub ReleaseFunc = func() {} - noopLimiterInstance = &noopLimiter{} -) - -func NewNoopLimiter() Limiter { - return noopLimiterInstance -} - -type noopLimiter struct{} - -func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { - return releaseStub, nil -} - -func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { - return releaseStub, nil -} - -func (n *noopLimiter) SetParentID(string) {} - -func (n *noopLimiter) Close() {} - -func (n *noopLimiter) SetMetrics(Metrics) {} - -var _ Limiter = (*mClockLimiter)(nil) - -type shardID struct { - id string -} - -type mClockLimiter struct { - readScheduler scheduler - writeScheduler scheduler - - readStats map[string]*stat - writeStats map[string]*stat - - shardID atomic.Pointer[shardID] - metrics atomic.Pointer[metricsHolder] - closeCh chan struct{} - wg *sync.WaitGroup -} - -func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { - return requestArrival(ctx, n.readScheduler, n.readStats) -} - -func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { - return requestArrival(ctx, n.writeScheduler, n.writeStats) -} - -func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - tag, ok := tagging.IOTagFromContext(ctx) - if !ok { - tag = IOTagClient.String() - } - stat := getStat(tag, stats) - stat.pending.Add(1) - if tag == IOTagCritical.String() { - stat.inProgress.Add(1) - return func() { - stat.completed.Add(1) - }, nil - } - rel, err := s.RequestArrival(ctx, tag) - stat.inProgress.Add(1) - if err != nil { - if isResourceExhaustedErr(err) { - stat.resourceExhausted.Add(1) - return nil, &apistatus.ResourceExhausted{} - } - stat.completed.Add(1) - return nil, err - } - return func() { - rel() - stat.completed.Add(1) - }, nil -} - -func (n *mClockLimiter) Close() { - n.readScheduler.Close() - n.writeScheduler.Close() - close(n.closeCh) - n.wg.Wait() - n.metrics.Load().metrics.Close(n.shardID.Load().id) -} - -func (n *mClockLimiter) SetParentID(parentID string) { - n.shardID.Store(&shardID{id: parentID}) -} - -func (n *mClockLimiter) SetMetrics(m Metrics) { - n.metrics.Store(&metricsHolder{metrics: m}) -} - -func (n *mClockLimiter) startMetricsCollect() { - n.wg.Add(1) - go func() { - defer n.wg.Done() - - ticker := time.NewTicker(defaultMetricsCollectTimeout) - defer ticker.Stop() - for { - select { - case <-n.closeCh: - return - case <-ticker.C: - shardID := n.shardID.Load().id - if shardID == "" { - continue - } - metrics := n.metrics.Load().metrics - exportMetrics(metrics, n.readStats, shardID, "read") - exportMetrics(metrics, n.writeStats, shardID, "write") - } - } - }() -} - -func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) { - var pending uint64 - var inProgress uint64 - var completed uint64 - var resExh uint64 - for tag, s := range stats { - pending = s.pending.Load() - inProgress = s.inProgress.Load() - completed = s.completed.Load() - resExh = s.resourceExhausted.Load() - if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 { - continue - } - metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) - } -} - -func isResourceExhaustedErr(err error) bool { - return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || - errors.Is(err, errSemaphoreLimitExceeded) || - errors.Is(err, scheduling.ErrTagRequestsProhibited) -} diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go deleted file mode 100644 index c00da51b7..000000000 --- a/internal/qos/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -package qos - -import "sync/atomic" - -type Metrics interface { - SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) - Close(shardID string) -} - -var _ Metrics = (*noopMetrics)(nil) - -type noopMetrics struct{} - -func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) { -} - -func (n *noopMetrics) Close(string) {} - -// stat presents limiter statistics cumulative counters. -// -// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`. -type stat struct { - completed atomic.Uint64 - pending atomic.Uint64 - resourceExhausted atomic.Uint64 - inProgress atomic.Uint64 -} - -type metricsHolder struct { - metrics Metrics -} diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go deleted file mode 100644 index 74e6928f3..000000000 --- a/internal/qos/semaphore.go +++ /dev/null @@ -1,39 +0,0 @@ -package qos - -import ( - "context" - "errors" - - qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" - "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" -) - -var ( - _ scheduler = (*semaphore)(nil) - errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded") -) - -type semaphore struct { - s *qosSemaphore.Semaphore -} - -func newSemaphoreScheduler(size int64) *semaphore { - return &semaphore{ - s: qosSemaphore.NewSemaphore(size), - } -} - -func (s *semaphore) Close() {} - -func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - if s.s.Acquire() { - return s.s.Release, nil - } - return nil, errSemaphoreLimitExceeded -} diff --git a/internal/qos/stats.go b/internal/qos/stats.go deleted file mode 100644 index 3ecfad9f9..000000000 --- a/internal/qos/stats.go +++ /dev/null @@ -1,29 +0,0 @@ -package qos - -const unknownStatsTag = "unknown" - -var statTags = map[string]struct{}{ - IOTagBackground.String(): {}, - IOTagClient.String(): {}, - IOTagCritical.String(): {}, - IOTagInternal.String(): {}, - IOTagPolicer.String(): {}, - IOTagTreeSync.String(): {}, - IOTagWritecache.String(): {}, - unknownStatsTag: {}, -} - -func createStats() map[string]*stat { - result := make(map[string]*stat) - for tag := range statTags { - result[tag] = &stat{} - } - return result -} - -func getStat(tag string, stats map[string]*stat) *stat { - if v, ok := stats[tag]; ok { - return v - } - return stats[unknownStatsTag] -} diff --git a/internal/qos/tags.go b/internal/qos/tags.go deleted file mode 100644 index e3f7cafd6..000000000 --- a/internal/qos/tags.go +++ /dev/null @@ -1,59 +0,0 @@ -package qos - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" -) - -type IOTag string - -const ( - IOTagBackground IOTag = "background" - IOTagClient IOTag = "client" - IOTagCritical IOTag = "critical" - IOTagInternal IOTag = "internal" - IOTagPolicer IOTag = "policer" - IOTagTreeSync IOTag = "treesync" - IOTagWritecache IOTag = "writecache" - - ioTagUnknown IOTag = "" -) - -func FromRawString(s string) (IOTag, error) { - switch s { - case string(IOTagBackground): - return IOTagBackground, nil - case string(IOTagClient): - return IOTagClient, nil - case string(IOTagCritical): - return IOTagCritical, nil - case string(IOTagInternal): - return IOTagInternal, nil - case string(IOTagPolicer): - return IOTagPolicer, nil - case string(IOTagTreeSync): - return IOTagTreeSync, nil - case string(IOTagWritecache): - return IOTagWritecache, nil - default: - return ioTagUnknown, fmt.Errorf("unknown tag %s", s) - } -} - -func (t IOTag) String() string { - return string(t) -} - -func IOTagFromContext(ctx context.Context) string { - tag, ok := tagging.IOTagFromContext(ctx) - if !ok { - tag = "undefined" - } - return tag -} - -func (t IOTag) IsLocal() bool { - return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync -} diff --git a/internal/qos/validate.go b/internal/qos/validate.go deleted file mode 100644 index 70f1f24e8..000000000 --- a/internal/qos/validate.go +++ /dev/null @@ -1,91 +0,0 @@ -package qos - -import ( - "errors" - "fmt" - "math" -) - -var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") - -type tagConfig struct { - Shares, Limit, Reserved *float64 -} - -func (c *LimiterConfig) Validate() error { - if err := validateOpConfig(c.Read); err != nil { - return fmt.Errorf("limits 'read' section validation error: %w", err) - } - if err := validateOpConfig(c.Write); err != nil { - return fmt.Errorf("limits 'write' section validation error: %w", err) - } - return nil -} - -func validateOpConfig(c OpConfig) error { - if c.MaxRunningOps <= 0 { - return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) - } - if c.MaxWaitingOps <= 0 { - return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps) - } - if c.IdleTimeout <= 0 { - return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String()) - } - if err := validateTags(c.Tags); err != nil { - return fmt.Errorf("'tags' config section validation error: %w", err) - } - return nil -} - -func validateTags(configTags []IOTagConfig) error { - tags := map[IOTag]tagConfig{ - IOTagBackground: {}, - IOTagClient: {}, - IOTagInternal: {}, - IOTagPolicer: {}, - IOTagTreeSync: {}, - IOTagWritecache: {}, - } - for _, t := range configTags { - tag, err := FromRawString(t.Tag) - if err != nil { - return fmt.Errorf("invalid tag %s: %w", t.Tag, err) - } - if _, ok := tags[tag]; !ok { - return fmt.Errorf("tag %s is not configurable", t.Tag) - } - tags[tag] = tagConfig{ - Shares: t.Weight, - Limit: t.LimitOps, - Reserved: t.ReservedOps, - } - } - idx := 0 - var shares float64 - for t, v := range tags { - if idx == 0 { - idx++ - shares = float64Value(v.Shares) - } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { - return errWeightsMustBeSpecified - } - if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { - return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) - } - if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { - return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) - } - if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { - return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String()) - } - } - return nil -} - -func float64Value(f *float64) float64 { - if f == nil { - return 0.0 - } - return *f -} diff --git a/misc/build.go b/misc/build.go deleted file mode 100644 index 52d9e13c2..000000000 --- a/misc/build.go +++ /dev/null @@ -1,21 +0,0 @@ -package misc - -import ( - "fmt" - "runtime" -) - -// These variables are changed in compile time. -var ( - // Version is an application version. - Version = "dev" -) - -// BuildInfo returns human-readable information about this binary. -func BuildInfo(component string) string { - return fmt.Sprintf("%s\nVersion: %s \nGoVersion: %s\n", - component, - Version, - runtime.Version(), - ) -} diff --git a/pkg/ape/chainbase/boltdb.go b/pkg/ape/chainbase/boltdb.go deleted file mode 100644 index 005b3bd84..000000000 --- a/pkg/ape/chainbase/boltdb.go +++ /dev/null @@ -1,329 +0,0 @@ -package chainbase - -import ( - "bytes" - "context" - "errors" - "fmt" - "path/filepath" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "go.etcd.io/bbolt" -) - -type boltLocalOverrideStorage struct { - *cfg - - db *bbolt.DB -} - -var chainBucket = []byte{0} - -var ( - // ErrRootBucketNotFound signals the database has not been properly initialized. - ErrRootBucketNotFound = logicerr.New("root bucket not found") - - ErrGlobalNamespaceBucketNotFound = logicerr.New("global namespace bucket not found") - - ErrTargetTypeBucketNotFound = logicerr.New("target type bucket not found") - - ErrTargetNameBucketNotFound = logicerr.New("target name bucket not found") - - ErrBucketNotContainsChainID = logicerr.New("chain id not found in bucket") - - errChainIDIsNotSet = errors.New("chain ID is not set") -) - -// NewBoltLocalOverrideDatabase returns storage wrapper for storing access policy engine -// local overrides. -// -// chain storage (chainBucket): -// -> global namespace bucket (nBucket): -// --> target bucket (tBucket) -// ---> target name (resource) bucket (rBucket): -// -// | Key | Value | -// x---------------------x-------------------x -// | chain id (string) | serialized chain | -// x---------------------x-------------------x -// -//nolint:godot -func NewBoltLocalOverrideDatabase(opts ...Option) LocalOverrideDatabase { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - return &boltLocalOverrideStorage{ - cfg: c, - } -} - -func (cs *boltLocalOverrideStorage) Init() error { - return cs.db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(chainBucket) - return err - }) -} - -func (cs *boltLocalOverrideStorage) Open(context.Context) error { - err := util.MkdirAllX(filepath.Dir(cs.path), cs.perm) - if err != nil { - return fmt.Errorf("can't create dir %s for the chain DB: %w", cs.path, err) - } - - opts := *bbolt.DefaultOptions - opts.NoSync = cs.noSync - opts.Timeout = 100 * time.Millisecond - - cs.db, err = bbolt.Open(cs.path, cs.perm, &opts) - if err != nil { - return fmt.Errorf("can't open the chain DB: %w", err) - } - - cs.db.MaxBatchSize = cs.maxBatchSize - cs.db.MaxBatchDelay = cs.maxBatchDelay - - return nil -} - -func (cs *boltLocalOverrideStorage) Close() error { - var err error - if cs.db != nil { - err = cs.db.Close() - } - return err -} - -func getTypeBucket(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) { - cbucket := tx.Bucket(chainBucket) - if cbucket == nil { - return nil, ErrRootBucketNotFound - } - - nbucket := cbucket.Bucket([]byte(name)) - if nbucket == nil { - return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name) - } - return nbucket.Bucket([]byte{byte(target.Type)}), nil -} - -func normalizeTargetName(target *policyengine.Target) { - if target.Type == policyengine.Namespace && target.Name == "" { - target.Name = "root" - } -} - -func getTargetBucket(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) { - typeBucket, err := getTypeBucket(tx, name, target) - if err != nil { - return nil, err - } - if typeBucket == nil { - return nil, fmt.Errorf("%w: %w: %c", policyengine.ErrChainNotFound, ErrTargetTypeBucketNotFound, target.Type) - } - - normalizeTargetName(&target) - rbucket := typeBucket.Bucket([]byte(target.Name)) - if rbucket == nil { - return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrTargetNameBucketNotFound, target.Name) - } - return rbucket, nil -} - -func getTargetBucketCreateIfEmpty(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) { - cbucket := tx.Bucket(chainBucket) - if cbucket == nil { - return nil, ErrRootBucketNotFound - } - - nbucket := cbucket.Bucket([]byte(name)) - if nbucket == nil { - var err error - nbucket, err = cbucket.CreateBucket([]byte(name)) - if err != nil { - return nil, fmt.Errorf("could not create a bucket for the global chain name %s: %w", name, err) - } - } - - typeBucket := nbucket.Bucket([]byte{byte(target.Type)}) - if typeBucket == nil { - var err error - typeBucket, err = nbucket.CreateBucket([]byte{byte(target.Type)}) - if err != nil { - return nil, fmt.Errorf("could not create a bucket for the target type '%c': %w", target.Type, err) - } - } - - normalizeTargetName(&target) - rbucket := typeBucket.Bucket([]byte(target.Name)) - if rbucket == nil { - var err error - rbucket, err = typeBucket.CreateBucket([]byte(target.Name)) - if err != nil { - return nil, fmt.Errorf("could not create a bucket for the target name %s: %w", target.Name, err) - } - } - - return rbucket, nil -} - -func (cs *boltLocalOverrideStorage) AddOverride(name chain.Name, target policyengine.Target, c *chain.Chain) (chain.ID, error) { - if len(c.ID) == 0 { - return chain.ID{}, errChainIDIsNotSet - } - - serializedChain := c.Bytes() - - err := cs.db.Update(func(tx *bbolt.Tx) error { - rbuck, err := getTargetBucketCreateIfEmpty(tx, name, target) - if err != nil { - return err - } - return rbuck.Put([]byte(c.ID), serializedChain) - }) - - return c.ID, err -} - -func (cs *boltLocalOverrideStorage) GetOverride(name chain.Name, target policyengine.Target, chainID chain.ID) (*chain.Chain, error) { - var serializedChain []byte - - if err := cs.db.View(func(tx *bbolt.Tx) error { - rbuck, err := getTargetBucket(tx, name, target) - if err != nil { - return err - } - serializedChain = rbuck.Get([]byte(chainID)) - if serializedChain == nil { - return fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrBucketNotContainsChainID, chainID) - } - serializedChain = bytes.Clone(serializedChain) - return nil - }); err != nil { - return nil, err - } - - c := &chain.Chain{} - if err := c.DecodeBytes(serializedChain); err != nil { - return nil, err - } - return c, nil -} - -func (cs *boltLocalOverrideStorage) RemoveOverride(name chain.Name, target policyengine.Target, chainID chain.ID) error { - return cs.db.Update(func(tx *bbolt.Tx) error { - rbuck, err := getTargetBucket(tx, name, target) - if err != nil { - return err - } - return rbuck.Delete([]byte(chainID)) - }) -} - -func (cs *boltLocalOverrideStorage) RemoveOverridesByTarget(name chain.Name, target policyengine.Target) error { - return cs.db.Update(func(tx *bbolt.Tx) error { - typeBucket, err := getTypeBucket(tx, name, target) - if err != nil { - return err - } - normalizeTargetName(&target) - return typeBucket.DeleteBucket([]byte(target.Name)) - }) -} - -func (cs *boltLocalOverrideStorage) ListOverrides(name chain.Name, target policyengine.Target) ([]*chain.Chain, error) { - var serializedChains [][]byte - var serializedChain []byte - if err := cs.db.View(func(tx *bbolt.Tx) error { - rbuck, err := getTargetBucket(tx, name, target) - if err != nil { - return err - } - return rbuck.ForEach(func(_, v []byte) error { - serializedChain = bytes.Clone(v) - serializedChains = append(serializedChains, serializedChain) - return nil - }) - }); err != nil { - if errors.Is(err, policyengine.ErrChainNotFound) { - return []*chain.Chain{}, nil - } - return nil, err - } - chains := make([]*chain.Chain, 0, len(serializedChains)) - for _, serializedChain = range serializedChains { - c := &chain.Chain{} - if err := c.DecodeBytes(serializedChain); err != nil { - return nil, err - } - chains = append(chains, c) - } - return chains, nil -} - -func (cs *boltLocalOverrideStorage) DropAllOverrides(name chain.Name) error { - return cs.db.Update(func(tx *bbolt.Tx) error { - cbucket := tx.Bucket(chainBucket) - if cbucket == nil { - return ErrRootBucketNotFound - } - - nbucket := cbucket.Bucket([]byte(name)) - if nbucket == nil { - return fmt.Errorf("%w: %w: global namespace %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name) - } - - return tx.DeleteBucket([]byte(name)) - }) -} - -func (cs *boltLocalOverrideStorage) ListOverrideDefinedTargets(name chain.Name) ([]policyengine.Target, error) { - var targets []policyengine.Target - if err := cs.db.View(func(tx *bbolt.Tx) error { - var err error - targets, err = getTargets(tx, name) - if err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - return targets, nil -} - -func getTargets(tx *bbolt.Tx, name chain.Name) ([]policyengine.Target, error) { - var targets []policyengine.Target - cbucket := tx.Bucket(chainBucket) - if cbucket == nil { - return nil, ErrRootBucketNotFound - } - - nbucket := cbucket.Bucket([]byte(name)) - if nbucket == nil { - return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name) - } - - if err := nbucket.ForEachBucket(func(k []byte) error { - ttype := policyengine.TargetType(k[0]) - if err := nbucket.Bucket(k).ForEachBucket(func(k []byte) error { - targets = append(targets, policyengine.Target{ - Type: ttype, - Name: string(bytes.Clone(k)), - }) - return nil - }); err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - return targets, nil -} diff --git a/pkg/ape/chainbase/inmemory.go b/pkg/ape/chainbase/inmemory.go deleted file mode 100644 index 27712d959..000000000 --- a/pkg/ape/chainbase/inmemory.go +++ /dev/null @@ -1,30 +0,0 @@ -package chainbase - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" -) - -type inmemoryLocalOverrideStorage struct { - engine.LocalOverrideStorage -} - -func NewInmemoryLocalOverrideDatabase() LocalOverrideDatabase { - return &inmemoryLocalOverrideStorage{ - LocalOverrideStorage: inmemory.NewInmemoryLocalStorage(), - } -} - -func (cs *inmemoryLocalOverrideStorage) Init() error { - return nil -} - -func (cs *inmemoryLocalOverrideStorage) Open(_ context.Context) error { - return nil -} - -func (cs *inmemoryLocalOverrideStorage) Close() error { - return nil -} diff --git a/pkg/ape/chainbase/interface.go b/pkg/ape/chainbase/interface.go deleted file mode 100644 index ee445f22c..000000000 --- a/pkg/ape/chainbase/interface.go +++ /dev/null @@ -1,22 +0,0 @@ -package chainbase - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" -) - -// DatabaseCore interface provides methods to initialize and manage local override storage -// as database. -type DatabaseCore interface { - Init() error - Open(context.Context) error - Close() error -} - -// LocalOverrideDatabase interface provides methods to manage local override storage -// as database and as the APE's local override storage. -type LocalOverrideDatabase interface { - DatabaseCore - engine.LocalOverrideStorage -} diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go deleted file mode 100644 index 590b7a885..000000000 --- a/pkg/ape/chainbase/option.go +++ /dev/null @@ -1,57 +0,0 @@ -package chainbase - -import ( - "io/fs" - "os" - "time" - - "go.etcd.io/bbolt" -) - -type Option func(*cfg) - -type cfg struct { - path string - perm fs.FileMode - noSync bool - maxBatchDelay time.Duration - maxBatchSize int -} - -func defaultCfg() *cfg { - return &cfg{ - perm: os.ModePerm, - maxBatchDelay: bbolt.DefaultMaxBatchDelay, - maxBatchSize: bbolt.DefaultMaxBatchSize, - } -} - -func WithPath(path string) Option { - return func(c *cfg) { - c.path = path - } -} - -func WithPerm(perm fs.FileMode) Option { - return func(c *cfg) { - c.perm = perm - } -} - -func WithNoSync(noSync bool) Option { - return func(c *cfg) { - c.noSync = noSync - } -} - -func WithMaxBatchDelay(maxBatchDelay time.Duration) Option { - return func(c *cfg) { - c.maxBatchDelay = maxBatchDelay - } -} - -func WithMaxBatchSize(maxBatchSize int) Option { - return func(c *cfg) { - c.maxBatchSize = maxBatchSize - } -} diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go deleted file mode 100644 index 8cbb1cce9..000000000 --- a/pkg/ape/contract_storage/proxy.go +++ /dev/null @@ -1,126 +0,0 @@ -package contractstorage - -import ( - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - policy_morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/notary" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" -) - -type ProxyAdaptedContractStorage interface { - AddMorphRuleChain(name chain.Name, target engine.Target, c *chain.Chain) (util.Uint256, uint32, error) - - RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error) - - ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) -} - -var _ ProxyAdaptedContractStorage = (engine.MorphRuleChainStorage)(nil) - -type RPCActorProvider interface { - GetRPCActor() actor.RPCActor -} - -// ProxyVerificationContractStorage uses decorated MorphRuleChainStorage with actor where cosigner is a proxy contract. -type ProxyVerificationContractStorage struct { - rpcActorProvider RPCActorProvider - - cosigners []actor.SignerAccount - - policyScriptHash util.Uint160 -} - -var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil) - -func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage { - acc := wallet.NewAccountFromPrivateKey(key) - return &ProxyVerificationContractStorage{ - rpcActorProvider: rpcActorProvider, - - cosigners: []actor.SignerAccount{ - { - Signer: transaction.Signer{ - Account: proxyScriptHash, - Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{policyScriptHash}, - }, - Account: notary.FakeContractAccount(proxyScriptHash), - }, - { - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CalledByEntry, - }, - Account: acc, - }, - }, - - policyScriptHash: policyScriptHash, - } -} - -// contractStorageActorAdapter adapats *actor.Actor to policy_morph.ContractStorageActor interface. -type contractStorageActorAdapter struct { - *actor.Actor - rpcActor invoker.RPCInvoke -} - -func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke { - return n.rpcActor -} - -func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) { - rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - act, err := actor.New(rpcActor, contractStorage.cosigners) - if err != nil { - return nil, err - } - return &contractStorageActorAdapter{ - Actor: act, - rpcActor: rpcActor, - }, nil -} - -// AddMorphRuleChain add morph rule chain to Policy contract using both Proxy contract and storage account as consigners. -func (contractStorage *ProxyVerificationContractStorage) AddMorphRuleChain(name chain.Name, target engine.Target, c *chain.Chain) (util.Uint256, uint32, error) { - // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but - // ProxyVerificationContractStorage does not manage reconnections. - contractStorageActor, err := contractStorage.newContractStorageActor() - if err != nil { - return util.Uint256{}, 0, err - } - return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).AddMorphRuleChain(name, target, c) -} - -// RemoveMorphRuleChain removes morph rule chain from Policy contract using both Proxy contract and storage account as consigners. -func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error) { - // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but - // ProxyVerificationContractStorage does not manage reconnections. - contractStorageActor, err := contractStorage.newContractStorageActor() - if err != nil { - return util.Uint256{}, 0, err - } - return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).RemoveMorphRuleChain(name, target, chainID) -} - -// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners. -func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { - rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor} - return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) -} - -type invokerAdapter struct { - *invoker.Invoker - rpcInvoker invoker.RPCInvoke -} - -func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { - return n.rpcInvoker -} diff --git a/pkg/ape/converter/converter.go b/pkg/ape/converter/converter.go deleted file mode 100644 index 9032680af..000000000 --- a/pkg/ape/converter/converter.go +++ /dev/null @@ -1,44 +0,0 @@ -package converter - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" -) - -func SchemaRoleFromACLRole(role acl.Role) (string, error) { - switch role { - case acl.RoleOwner: - return nativeschema.PropertyValueContainerRoleOwner, nil - case acl.RoleContainer: - return nativeschema.PropertyValueContainerRoleContainer, nil - case acl.RoleInnerRing: - return nativeschema.PropertyValueContainerRoleIR, nil - case acl.RoleOthers: - return nativeschema.PropertyValueContainerRoleOthers, nil - default: - return "", fmt.Errorf("failed to convert %s", role.String()) - } -} - -func SchemaMethodFromACLOperation(op acl.Op) (string, error) { - switch op { - case acl.OpObjectGet: - return nativeschema.MethodGetObject, nil - case acl.OpObjectHead: - return nativeschema.MethodHeadObject, nil - case acl.OpObjectPut: - return nativeschema.MethodPutObject, nil - case acl.OpObjectDelete: - return nativeschema.MethodDeleteObject, nil - case acl.OpObjectSearch: - return nativeschema.MethodSearchObject, nil - case acl.OpObjectRange: - return nativeschema.MethodRangeObject, nil - case acl.OpObjectHash: - return nativeschema.MethodHashObject, nil - default: - return "", fmt.Errorf("operation cannot be converted: %d", op) - } -} diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go deleted file mode 100644 index d32bd4a07..000000000 --- a/pkg/ape/request/frostfsid.go +++ /dev/null @@ -1,53 +0,0 @@ -package request - -import ( - "context" - "fmt" - "strconv" - "strings" - - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. -func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { - reqProps := make(map[string]string) - subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) - if err != nil { - if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return nil, fmt.Errorf("get subject error: %w", err) - } - return reqProps, nil - } - for k, v := range subj.KV { - propertyKey := fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, k) - reqProps[propertyKey] = v - } - - groups := make([]string, len(subj.Groups)) - for i, group := range subj.Groups { - groups[i] = strconv.FormatInt(group.ID, 10) - } - reqProps[commonschema.PropertyKeyFrostFSIDGroupID] = apechain.FormCondSliceContainsValue(groups) - - return reqProps, nil -} - -// Groups return the actor's group ids from frostfsid contract. -func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { - subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) - if err != nil { - if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return nil, fmt.Errorf("get subject error: %w", err) - } - return []string{}, nil - } - groups := make([]string, len(subj.Groups)) - for i, group := range subj.Groups { - groups[i] = strconv.FormatInt(group.ID, 10) - } - return groups, nil -} diff --git a/pkg/ape/request/request.go b/pkg/ape/request/request.go deleted file mode 100644 index de67dea23..000000000 --- a/pkg/ape/request/request.go +++ /dev/null @@ -1,55 +0,0 @@ -package request - -import ( - aperesource "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource" -) - -type Request struct { - operation string - resource Resource - properties map[string]string -} - -func NewRequest(operation string, resource Resource, properties map[string]string) Request { - return Request{ - operation: operation, - resource: resource, - properties: properties, - } -} - -var _ aperesource.Request = Request{} - -func (r Request) Operation() string { - return r.operation -} - -func (r Request) Property(key string) string { - return r.properties[key] -} - -func (r Request) Resource() aperesource.Resource { - return r.resource -} - -type Resource struct { - name string - properties map[string]string -} - -var _ aperesource.Resource = Resource{} - -func NewResource(name string, properties map[string]string) Resource { - return Resource{ - name: name, - properties: properties, - } -} - -func (r Resource) Name() string { - return r.name -} - -func (r Resource) Property(key string) string { - return r.properties[key] -} diff --git a/pkg/ape/router/bearer_overrides.go b/pkg/ape/router/bearer_overrides.go deleted file mode 100644 index 2bc8ad614..000000000 --- a/pkg/ape/router/bearer_overrides.go +++ /dev/null @@ -1,94 +0,0 @@ -package router - -import ( - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" -) - -func newTarget(ct ape.ChainTarget) (policyengine.Target, error) { - var target policyengine.Target - switch ct.TargetType { - case ape.TargetTypeContainer: - var cid cidSDK.ID - err := cid.DecodeString(ct.Name) - if err != nil { - return target, fmt.Errorf("invalid cid format: %s", target.Name) - } - target.Type = policyengine.Container - case ape.TargetTypeGroup: - target.Type = policyengine.Group - case ape.TargetTypeNamespace: - target.Type = policyengine.Namespace - case ape.TargetTypeUser: - target.Type = policyengine.User - default: - return target, fmt.Errorf("unsupported target type: %v", ct.TargetType) - } - target.Name = ct.Name - return target, nil -} - -type morphReaderDecorator struct { - policyengine.MorphRuleChainStorageReader - - bearerTokenTarget policyengine.Target - - bearerTokenChains []*chain.Chain -} - -func newMorphReaderDecorator(r policyengine.MorphRuleChainStorageReader, override bearer.APEOverride) (*morphReaderDecorator, error) { - if r == nil { - return nil, errors.New("empty morph chain rule reader") - } - t, err := newTarget(override.Target) - if err != nil { - return nil, err - } - - bearerTokenChains := make([]*chain.Chain, len(override.Chains)) - for i := range override.Chains { - chain := new(chain.Chain) - if err := chain.DecodeBytes(override.Chains[i].Raw); err != nil { - return nil, fmt.Errorf("invalid ape chain: %w", err) - } - bearerTokenChains[i] = chain - } - - return &morphReaderDecorator{ - MorphRuleChainStorageReader: r, - bearerTokenTarget: t, - bearerTokenChains: bearerTokenChains, - }, nil -} - -func (m *morphReaderDecorator) ListMorphRuleChains(name chain.Name, target policyengine.Target) ([]*chain.Chain, error) { - if len(m.bearerTokenChains) > 0 && m.bearerTokenTarget.Type == target.Type { - if m.bearerTokenTarget.Name != target.Name { - return nil, fmt.Errorf("unexpected bearer token target: %s", m.bearerTokenTarget.Name) - } - return m.bearerTokenChains, nil - } - return m.MorphRuleChainStorageReader.ListMorphRuleChains(name, target) -} - -// BearerChainFeedRouter creates a chain router emplacing bearer token rule chains. -// Bearer token chains override only container target chains within Policy contract. This means the order of checking -// is as follows: -// -// 1. Local overrides; -// 2. Policy contract chains for a namespace target (as namespace chains have higher priority); -// 3. Bearer token chains for a container target - if they're not defined, then it checks Policy contract chains; -// 4. Checks for the remaining targets. -func BearerChainFeedRouter(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, override bearer.APEOverride) (policyengine.ChainRouter, error) { - mr, err := newMorphReaderDecorator(morphChainStorage, override) - if err != nil { - return nil, fmt.Errorf("create morph reader with bearer override error: %w", err) - } - return policyengine.NewDefaultChainRouterWithLocalOverrides(mr, localOverrideStorage), nil -} diff --git a/pkg/ape/router/bearer_overrides_test.go b/pkg/ape/router/bearer_overrides_test.go deleted file mode 100644 index 3c12ee6fa..000000000 --- a/pkg/ape/router/bearer_overrides_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package router_test - -import ( - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router" - apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" - resourcetest "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource/testutil" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" -) - -const ( - container = "67ETTZzbzJC6WxdQhHHHsJNCttVMBqYrSoFaUFVDNfiX" - rootNs = "" -) - -var ( - allowBySourceIP = &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: chain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)}}, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindRequest, - Key: "SourceIP", - Value: "10.122.1.20", - }, - }, - }, - }, - } - - denyBySourceIP = &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: chain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)}}, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindRequest, - Key: "SourceIP", - Value: "10.122.1.20", - }, - }, - }, - }, - } -) - -func TestBearerChainFedRouter(t *testing.T) { - t.Run("no bearer token overrides", func(t *testing.T) { - inmem := inmemory.NewInMemoryLocalOverrides() - - inmem.LocalStorage().AddOverride(chain.Ingress, engine.ContainerTarget(container), denyBySourceIP) - inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP) - - _, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bearerSDK.APEOverride{}) - require.Error(t, err) - }) - t.Run("allow by container with deny by bearer overrides", func(t *testing.T) { - inmem := inmemory.NewInMemoryLocalOverrides() - - inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP) - - bt := bearerSDK.APEOverride{ - Target: apeSDK.ChainTarget{ - TargetType: apeSDK.TargetTypeContainer, - Name: container, - }, - Chains: []apeSDK.Chain{{ - Raw: denyBySourceIP.Bytes(), - }}, - } - - r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt) - require.NoError(t, err) - - req := resourcetest.NewRequest(nativeschema.MethodPutObject, - resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}), - map[string]string{ - "SourceIP": "10.122.1.20", - "Actor": "someOwner", - }, - ) - - st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, st, chain.AccessDenied) - }) - t.Run("allow by namespace with deny by bearer overrides", func(t *testing.T) { - inmem := inmemory.NewInMemoryLocalOverrides() - - inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP) - inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(rootNs), allowBySourceIP) - - bt := bearerSDK.APEOverride{ - Target: apeSDK.ChainTarget{ - TargetType: apeSDK.TargetTypeContainer, - Name: container, - }, - Chains: []apeSDK.Chain{{ - Raw: denyBySourceIP.Bytes(), - }}, - } - - r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt) - require.NoError(t, err) - - req := resourcetest.NewRequest(nativeschema.MethodPutObject, - resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}), - map[string]string{ - "SourceIP": "10.122.1.20", - "Actor": "someOwner", - }, - ) - - st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, st, chain.AccessDenied) - }) - t.Run("deny by namespace with allow by bearer overrides", func(t *testing.T) { - inmem := inmemory.NewInMemoryLocalOverrides() - - inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(rootNs), denyBySourceIP) - - bt := bearerSDK.APEOverride{ - Target: apeSDK.ChainTarget{ - TargetType: apeSDK.TargetTypeContainer, - Name: container, - }, - Chains: []apeSDK.Chain{{ - Raw: allowBySourceIP.Bytes(), - }}, - } - - r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt) - require.NoError(t, err) - - req := resourcetest.NewRequest(nativeschema.MethodPutObject, - resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}), - map[string]string{ - "SourceIP": "10.122.1.20", - "Actor": "someOwner", - }, - ) - - st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, st, chain.AccessDenied) - }) -} diff --git a/pkg/ape/router/single_pass.go b/pkg/ape/router/single_pass.go deleted file mode 100644 index ec9244bae..000000000 --- a/pkg/ape/router/single_pass.go +++ /dev/null @@ -1,30 +0,0 @@ -package router - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" -) - -// SingleUseRouterWithBearerTokenChains creates chain router with inmemory storage implementation and -// fed with APE chains defined in Bearer token. -func SingleUseRouterWithBearerTokenChains(overrides []bearer.APEOverride) (engine.ChainRouter, error) { - storage := inmemory.NewInmemoryMorphRuleChainStorage() - for _, override := range overrides { - target, err := newTarget(override.Target) - if err != nil { - return nil, err - } - for i := range override.Chains { - chain := new(apechain.Chain) - if err := chain.DecodeBytes(override.Chains[i].Raw); err != nil { - return nil, fmt.Errorf("invalid ape chain: %w", err) - } - _, _, _ = storage.AddMorphRuleChain(apechain.Ingress, target, chain) - } - } - return engine.NewDefaultChainRouter(storage), nil -} diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go deleted file mode 100644 index 98bdf99e7..000000000 --- a/pkg/core/client/client.go +++ /dev/null @@ -1,79 +0,0 @@ -package client - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" -) - -// Client is an interface of FrostFS storage -// node's client. -type Client interface { - ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error) - ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error) - ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error) - ObjectGetInit(context.Context, client.PrmObjectGet) (*client.ObjectReader, error) - ObjectHead(context.Context, client.PrmObjectHead) (*client.ResObjectHead, error) - ObjectSearchInit(context.Context, client.PrmObjectSearch) (*client.ObjectListReader, error) - ObjectRangeInit(context.Context, client.PrmObjectRange) (*client.ObjectRangeReader, error) - ObjectHash(context.Context, client.PrmObjectHash) (*client.ResObjectHash, error) - ExecRaw(f func(client *rawclient.Client) error) error - Close() error -} - -// MultiAddressClient is an interface of the -// Client that supports multihost work. -type MultiAddressClient interface { - Client - - // RawForAddress must return rawclient.Client - // for the passed network.Address. - RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error - - ReportError(error) -} - -// NodeInfo groups information about a FrostFS storage node needed for Client construction. -type NodeInfo struct { - addrGroup network.AddressGroup - - externalAddrGroup network.AddressGroup - - key []byte -} - -// SetAddressGroup sets a group of network addresses. -func (x *NodeInfo) SetAddressGroup(v network.AddressGroup) { - x.addrGroup = v -} - -// AddressGroup returns a group of network addresses. -func (x NodeInfo) AddressGroup() network.AddressGroup { - return x.addrGroup -} - -// SetExternalAddressGroup sets an external group of network addresses. -func (x *NodeInfo) SetExternalAddressGroup(v network.AddressGroup) { - x.externalAddrGroup = v -} - -// ExternalAddressGroup returns a group of network addresses. -func (x NodeInfo) ExternalAddressGroup() network.AddressGroup { - return x.externalAddrGroup -} - -// SetPublicKey sets a public key in a binary format. -// -// Argument must not be mutated. -func (x *NodeInfo) SetPublicKey(v []byte) { - x.key = v -} - -// PublicKey returns a public key in a binary format. -// -// Result must not be mutated. -func (x *NodeInfo) PublicKey() []byte { - return x.key -} diff --git a/pkg/core/client/errors.go b/pkg/core/client/errors.go deleted file mode 100644 index a2aac5e55..000000000 --- a/pkg/core/client/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package client - -import ( - "errors" -) - -// ErrWrongPublicKey is returned when the client's response is signed with a key different -// from the one declared in the network map. -var ErrWrongPublicKey = errors.New("public key is different from the key in the network map") diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go deleted file mode 100644 index 91ee5c6c3..000000000 --- a/pkg/core/client/util.go +++ /dev/null @@ -1,69 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "iter" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" -) - -func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGroup) { - dst.SetPublicKey(k) - dst.SetAddressGroup(a) - dst.SetExternalAddressGroup(external) -} - -// NodeInfoFromRawNetmapElement fills NodeInfo structure from the interface of raw netmap member's descriptor. -// -// Args must not be nil. -func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { - PublicKey() []byte - Addresses() iter.Seq[string] - NumberOfAddresses() int - ExternalAddresses() []string -}, -) error { - var a network.AddressGroup - - err := a.FromIterator(info) - if err != nil { - return fmt.Errorf("parse network address: %w", err) - } - - var external network.AddressGroup - - ext := info.ExternalAddresses() - if len(ext) > 0 { - _ = external.FromStringSlice(ext) - } - - nodeInfoFromKeyAddr(dst, info.PublicKey(), a, external) - - return nil -} - -// NodeInfoFromNetmapElement fills NodeInfo structure from the interface of the parsed netmap member's descriptor. -// -// Args must not be nil. -func NodeInfoFromNetmapElement(dst *NodeInfo, info interface { - PublicKey() []byte - Addresses() network.AddressGroup - ExternalAddresses() network.AddressGroup -}, -) { - nodeInfoFromKeyAddr(dst, info.PublicKey(), info.Addresses(), info.ExternalAddresses()) -} - -// AssertKeyResponseCallback returns client response callback which checks if the response was signed by the expected key. -// Returns ErrWrongPublicKey in case of key mismatch. -func AssertKeyResponseCallback(expectedKey []byte) func(client.ResponseMetaInfo) error { - return func(info client.ResponseMetaInfo) error { - if !bytes.Equal(info.ResponderKey(), expectedKey) { - return ErrWrongPublicKey - } - - return nil - } -} diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go deleted file mode 100644 index 8c14bdf5e..000000000 --- a/pkg/core/container/delete.go +++ /dev/null @@ -1,22 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -// RemovalWitness groups the information required -// to prove and verify the removal of a container. -type RemovalWitness struct { - // ContainerID returns the identifier of the container - // to be removed. - ContainerID cid.ID - - // Signature the signature of the container identifier. - Signature *refs.Signature - - // SessionToken the token of the session within - // which the container was removed. - SessionToken *session.Container -} diff --git a/pkg/core/container/ec.go b/pkg/core/container/ec.go deleted file mode 100644 index 1acb87f2b..000000000 --- a/pkg/core/container/ec.go +++ /dev/null @@ -1,11 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" -) - -// IsECContainer returns True if container has erasure coding policy. -func IsECContainer(cnr containerSDK.Container) bool { - return policy.IsECPlacement(cnr.PlacementPolicy()) -} diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go deleted file mode 100644 index 1c52d93e7..000000000 --- a/pkg/core/container/info.go +++ /dev/null @@ -1,104 +0,0 @@ -package container - -import ( - "context" - "sync" - - utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -type Info struct { - Indexed bool - Removed bool -} - -type infoValue struct { - info Info - err error -} - -type InfoProvider interface { - Info(ctx context.Context, id cid.ID) (Info, error) -} - -type infoProvider struct { - mtx *sync.RWMutex - cache map[cid.ID]infoValue - kl *utilSync.KeyLocker[cid.ID] - - source Source - sourceErr error - sourceOnce *sync.Once - sourceFactory func() (Source, error) -} - -func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider { - return &infoProvider{ - mtx: &sync.RWMutex{}, - cache: make(map[cid.ID]infoValue), - sourceOnce: &sync.Once{}, - kl: utilSync.NewKeyLocker[cid.ID](), - sourceFactory: sourceFactory, - } -} - -func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { - v, found := r.tryGetFromCache(id) - if found { - return v.info, v.err - } - - return r.getFromSource(ctx, id) -} - -func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - value, found := r.cache[id] - return value, found -} - -func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { - r.kl.Lock(id) - defer r.kl.Unlock(id) - - if v, ok := r.tryGetFromCache(id); ok { - return v.info, v.err - } - - r.sourceOnce.Do(func() { - r.source, r.sourceErr = r.sourceFactory() - }) - if r.sourceErr != nil { - return Info{}, r.sourceErr - } - - cnr, err := r.source.Get(ctx, id) - var civ infoValue - if err != nil { - if client.IsErrContainerNotFound(err) { - removed, err := WasRemoved(ctx, r.source, id) - if err != nil { - civ.err = err - } else { - civ.info.Removed = removed - } - } else { - civ.err = err - } - } else { - civ.info.Indexed = IsIndexedContainer(cnr.Value) - } - r.putToCache(id, civ) - return civ.info, civ.err -} - -func (r *infoProvider) putToCache(id cid.ID, ct infoValue) { - r.mtx.Lock() - defer r.mtx.Unlock() - - r.cache[id] = ct -} diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go deleted file mode 100644 index 4eb14e53c..000000000 --- a/pkg/core/container/storage.go +++ /dev/null @@ -1,62 +0,0 @@ -package container - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// Container groups information about the FrostFS container stored in the FrostFS network. -type Container struct { - // Container structure. - Value container.Container - - // Signature of the Value. - Signature frostfscrypto.Signature - - // Session within which Value was created. Nil means session absence. - Session *session.Container -} - -// DelInfo contains info about removed container. -type DelInfo struct { - // Container owner. - Owner user.ID - - // Epoch indicates when the container was removed. - Epoch uint64 -} - -// Source is an interface that wraps -// basic container receiving method. -type Source interface { - // Get reads the container from the storage by its identifier. - // It returns the pointer to the requested container and any error encountered. - // - // Get must return exactly one non-nil value. - // Get must return an error of type apistatus.ContainerNotFound if the container is not in the storage. - // - // Implementations must not retain the container pointer and modify - // the container through it. - Get(ctx context.Context, cid cid.ID) (*Container, error) - - DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) -} - -// EACL groups information about the FrostFS container's extended ACL stored in -// the FrostFS network. -type EACL struct { - // Extended ACL structure. - Value *eacl.Table - - // Signature of the Value. - Signature frostfscrypto.Signature - - // Session within which Value was set. Nil means session absence. - Session *session.Container -} diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go deleted file mode 100644 index 61c568052..000000000 --- a/pkg/core/container/util.go +++ /dev/null @@ -1,35 +0,0 @@ -package container - -import ( - "context" - "errors" - - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// WasRemoved checks whether the container ever existed or -// it just has not been created yet at the current epoch. -func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { - _, err := s.DeletionInfo(ctx, cid) - if err == nil { - return true, nil - } - var errContainerNotFound *apistatus.ContainerNotFound - if errors.As(err, &errContainerNotFound) { - return false, nil - } - return false, err -} - -// IsIndexedContainer returns True if container attributes should be indexed. -func IsIndexedContainer(cnr containerSDK.Container) bool { - var isS3Container bool - for key := range cnr.Attributes() { - if key == ".s3-location-constraint" { - isS3Container = true - } - } - return !isS3Container -} diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go deleted file mode 100644 index e752043d3..000000000 --- a/pkg/core/frostfsid/subject_provider.go +++ /dev/null @@ -1,18 +0,0 @@ -package frostfsid - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -const ( - SubjectNotFoundErrorMessage = "subject not found" -) - -// SubjectProvider interface provides methods to get subject from FrostfsID contract. -type SubjectProvider interface { - GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) - GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) -} diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go deleted file mode 100644 index 0c64bb798..000000000 --- a/pkg/core/netmap/keys.go +++ /dev/null @@ -1,7 +0,0 @@ -package netmap - -// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes. -type AnnouncedKeys interface { - // IsLocalKey checks if the key was announced by a local node. - IsLocalKey(key []byte) bool -} diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go deleted file mode 100644 index e58e42634..000000000 --- a/pkg/core/netmap/nodes.go +++ /dev/null @@ -1,50 +0,0 @@ -package netmap - -import ( - "iter" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// Node is a named type of netmap.NodeInfo which provides interface needed -// in the current repository. Node is expected to be used everywhere instead -// of direct usage of netmap.NodeInfo, so it represents a type mediator. -type Node netmap.NodeInfo - -// PublicKey returns public key bound to the storage node. -// -// Return value MUST NOT be mutated, make a copy first. -func (x Node) PublicKey() []byte { - return (netmap.NodeInfo)(x).PublicKey() -} - -// Addresses returns an iterator over all announced network addresses. -func (x Node) Addresses() iter.Seq[string] { - return (netmap.NodeInfo)(x).NetworkEndpoints() -} - -// IterateAddresses iterates over all announced network addresses -// and passes them into f. Handler MUST NOT be nil. -// Deprecated: use [Node.Addresses] instead. -func (x Node) IterateAddresses(f func(string) bool) { - for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { - if f(s) { - return - } - } -} - -// NumberOfAddresses returns number of announced network addresses. -func (x Node) NumberOfAddresses() int { - return (netmap.NodeInfo)(x).NumberOfNetworkEndpoints() -} - -// ExternalAddresses returns external addresses of a node. -func (x Node) ExternalAddresses() []string { - return (netmap.NodeInfo)(x).ExternalAddresses() -} - -// Nodes is a named type of []netmap.NodeInfo which provides interface needed -// in the current repository. Nodes is expected to be used everywhere instead -// of direct usage of []netmap.NodeInfo, so it represents a type mediator. -type Nodes []netmap.NodeInfo diff --git a/pkg/core/netmap/state.go b/pkg/core/netmap/state.go deleted file mode 100644 index 1b057d6d7..000000000 --- a/pkg/core/netmap/state.go +++ /dev/null @@ -1,7 +0,0 @@ -package netmap - -// State groups the current system state parameters. -type State interface { - // CurrentEpoch returns the number of the current FrostFS epoch. - CurrentEpoch() uint64 -} diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go deleted file mode 100644 index 97313da84..000000000 --- a/pkg/core/netmap/storage.go +++ /dev/null @@ -1,47 +0,0 @@ -package netmap - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// Source is an interface that wraps -// basic network map receiving method. -type Source interface { - // GetNetMap reads the diff-th past network map from the storage. - // Calling with zero diff returns the latest network map. - // It returns the pointer to the requested network map and any error encountered. - // - // GetNetMap must return exactly one non-nil value. - // GetNetMap must return ErrNotFound if the network map is not in the storage. - // - // Implementations must not retain the network map pointer and modify - // the network map through it. - GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) - - // GetNetMapByEpoch reads network map by the epoch number from the storage. - // It returns the pointer to the requested network map and any error encountered. - // - // Must return exactly one non-nil value. - // - // Implementations must not retain the network map pointer and modify - // the network map through it. - GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) - - // Epoch reads the current epoch from the storage. - // It returns thw number of the current epoch and any error encountered. - // - // Must return exactly one non-default value. - Epoch(ctx context.Context) (uint64, error) -} - -// GetLatestNetworkMap requests and returns the latest network map from the storage. -func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { - return src.GetNetMap(ctx, 0) -} - -// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. -func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { - return src.GetNetMap(ctx, 1) -} diff --git a/pkg/core/object/ec.go b/pkg/core/object/ec.go deleted file mode 100644 index 549ff7cd3..000000000 --- a/pkg/core/object/ec.go +++ /dev/null @@ -1,13 +0,0 @@ -package object - -import ( - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -// IsECSupported returns True if EC supported for object. -// -// EC supported only for regular, not linking objects. -func IsECSupported(obj *objectSDK.Object) bool { - return obj.Type() == objectSDK.TypeRegular && - len(obj.Children()) == 0 -} diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go deleted file mode 100644 index cf090eb37..000000000 --- a/pkg/core/object/fmt.go +++ /dev/null @@ -1,458 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "errors" - "fmt" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// FormatValidator represents an object format validator. -type FormatValidator struct { - *cfg - - senderClassifier SenderClassifier -} - -// FormatValidatorOption represents a FormatValidator constructor option. -type FormatValidatorOption func(*cfg) - -type cfg struct { - netState netmap.State - e LockSource - ir InnerRing - netmap netmap.Source - containers container.Source - log *logger.Logger - verifyTokenIssuer bool -} - -// LockSource is a source of lock relations between the objects. -type LockSource interface { - // IsLocked must clarify object's lock status. - IsLocked(ctx context.Context, address oid.Address) (bool, error) -} - -var errNilObject = errors.New("object is nil") - -var errNilID = errors.New("missing identifier") - -var errNilCID = errors.New("missing container identifier") - -var errNoExpirationEpoch = errors.New("missing expiration epoch attribute") - -var errTombstoneExpiration = errors.New("tombstone body and header contain different expiration values") - -var errMissingSignature = errors.New("missing signature") - -func defaultCfg() *cfg { - return new(cfg) -} - -// NewFormatValidator creates, initializes and returns FormatValidator instance. -func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator { - cfg := defaultCfg() - - for i := range opts { - opts[i](cfg) - } - - return &FormatValidator{ - cfg: cfg, - senderClassifier: NewSenderClassifier(cfg.ir, cfg.netmap, cfg.log), - } -} - -// Validate validates object format. -// -// Does not validate payload checksum and content. -// If unprepared is true, only fields set by user are validated. -// -// Returns nil error if the object has valid structure. -func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, unprepared bool) error { - if obj == nil { - return errNilObject - } - - _, idSet := obj.ID() - if !unprepared && !idSet { - return errNilID - } - - _, cnrSet := obj.ContainerID() - if !cnrSet { - return errNilCID - } - - if err := v.checkOwner(obj); err != nil { - return err - } - - if err := v.checkAttributes(obj); err != nil { - return fmt.Errorf("invalid attributes: %w", err) - } - - exp, err := expirationEpochAttribute(obj) - if err != nil { - if !errors.Is(err, errNoExpirationEpoch) { - return fmt.Errorf("object did not pass expiration check: %w", err) - } - } else if !unprepared && exp < v.netState.CurrentEpoch() { - if err := v.checkIfExpired(ctx, obj); err != nil { - return fmt.Errorf("object did not pass expiration check: %w", err) - } - } - - if !unprepared { - if err := v.validateSignatureKey(ctx, obj); err != nil { - return fmt.Errorf("(%T) could not validate signature key: %w", v, err) - } - - if err := objectSDK.CheckHeaderVerificationFields(obj); err != nil { - return fmt.Errorf("(%T) could not validate header fields: %w", v, err) - } - } - - if obj = obj.Parent(); obj != nil { - // Parent object already exists. - return v.Validate(ctx, obj, false) - } - - return nil -} - -func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error { - sig := obj.Signature() - if sig == nil { - return errMissingSignature - } - - var sigV2 refs.Signature - sig.WriteToV2(&sigV2) - - binKey := sigV2.GetKey() - - var key frostfsecdsa.PublicKey - - err := key.Decode(binKey) - if err != nil { - return fmt.Errorf("decode public key: %w", err) - } - - token := obj.SessionToken() - ownerID := obj.OwnerID() - - if token == nil && obj.ECHeader() != nil { - role, err := v.isIROrContainerNode(ctx, obj, binKey) - if err != nil { - return err - } - if role == acl.RoleContainer { - // EC part could be restored or created by container node, so ownerID could not match object signature - return nil - } - return v.checkOwnerKey(ownerID, key) - } - - if token == nil || !token.AssertAuthKey(&key) { - return v.checkOwnerKey(ownerID, key) - } - - if v.verifyTokenIssuer { - role, err := v.isIROrContainerNode(ctx, obj, binKey) - if err != nil { - return err - } - - if role == acl.RoleContainer || role == acl.RoleInnerRing { - return nil - } - - if !token.Issuer().Equals(ownerID) { - return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", v, token.Issuer(), ownerID) - } - return nil - } - - return nil -} - -func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { - cnrID, containerIDSet := obj.ContainerID() - if !containerIDSet { - return acl.RoleOthers, errNilCID - } - - cnrIDBin := make([]byte, sha256.Size) - cnrID.Encode(cnrIDBin) - - cnr, err := v.containers.Get(ctx, cnrID) - if err != nil { - return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) - } - - res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value) - if err != nil { - return acl.RoleOthers, err - } - return res.Role, nil -} - -func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) error { - var id2 user.ID - user.IDFromKey(&id2, (ecdsa.PublicKey)(key)) - - if !id.Equals(id2) { - return fmt.Errorf("(%T) different owner identifiers %s/%s", v, id, id2) - } - - return nil -} - -// ContentMeta describes FrostFS meta information that brings object's payload if the object -// is one of: -// - object.TypeTombstone; -// - object.TypeLock. -type ContentMeta struct { - typ objectSDK.Type - - objs []oid.ID -} - -// Type returns object's type. -func (i ContentMeta) Type() objectSDK.Type { - return i.typ -} - -// Objects returns objects that the original object's payload affects: -// - inhumed objects, if the original object is a Tombstone; -// - locked objects, if the original object is a Lock; -// - nil, if the original object is a Regular object. -func (i ContentMeta) Objects() []oid.ID { - return i.objs -} - -// ValidateContent validates payload content according to the object type. -func (v *FormatValidator) ValidateContent(o *objectSDK.Object) (ContentMeta, error) { - meta := ContentMeta{ - typ: o.Type(), - } - - switch o.Type() { - case objectSDK.TypeTombstone: - if err := v.fillAndValidateTombstoneMeta(o, &meta); err != nil { - return ContentMeta{}, err - } - case objectSDK.TypeLock: - if err := v.fillAndValidateLockMeta(o, &meta); err != nil { - return ContentMeta{}, err - } - default: - // ignore all other object types, they do not need payload formatting - } - - return meta, nil -} - -func (v *FormatValidator) fillAndValidateLockMeta(o *objectSDK.Object, meta *ContentMeta) error { - if len(o.Payload()) == 0 { - return errors.New("empty payload in lock") - } - - if _, ok := o.ContainerID(); !ok { - return errors.New("missing container") - } - - if _, ok := o.ID(); !ok { - return errors.New("missing ID") - } - // check that LOCK object has correct expiration epoch - lockExp, err := expirationEpochAttribute(o) - if err != nil { - return fmt.Errorf("lock object expiration epoch: %w", err) - } - - if currEpoch := v.netState.CurrentEpoch(); lockExp < currEpoch { - return fmt.Errorf("lock object expiration: %d; current: %d", lockExp, currEpoch) - } - - var lock objectSDK.Lock - - if err = lock.Unmarshal(o.Payload()); err != nil { - return fmt.Errorf("decode lock payload: %w", err) - } - - num := lock.NumberOfMembers() - if num == 0 { - return errors.New("missing locked members") - } - - meta.objs = make([]oid.ID, num) - lock.ReadMembers(meta.objs) - return nil -} - -func (v *FormatValidator) fillAndValidateTombstoneMeta(o *objectSDK.Object, meta *ContentMeta) error { - if len(o.Payload()) == 0 { - return fmt.Errorf("(%T) empty payload in tombstone", v) - } - - tombstone := objectSDK.NewTombstone() - - if err := tombstone.Unmarshal(o.Payload()); err != nil { - return fmt.Errorf("(%T) could not unmarshal tombstone content: %w", v, err) - } - // check if the tombstone has the same expiration in the body and the header - exp, err := expirationEpochAttribute(o) - if err != nil { - return err - } - - if exp != tombstone.ExpirationEpoch() { - return errTombstoneExpiration - } - - // mark all objects from the tombstone body as removed in the storage engine - if _, ok := o.ContainerID(); !ok { - return errors.New("missing container ID") - } - - meta.objs = tombstone.Members() - return nil -} - -var errExpired = errors.New("object has expired") - -func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Object) error { - // an object could be expired but locked; - // put such an object is a correct operation - - cID, _ := obj.ContainerID() - oID, _ := obj.ID() - - var addr oid.Address - addr.SetContainer(cID) - addr.SetObject(oID) - - locked, err := v.e.IsLocked(ctx, addr) - if err != nil { - return fmt.Errorf("locking status check for an expired object: %w", err) - } - - if !locked { - return errExpired - } - - return nil -} - -func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) { - for _, a := range obj.Attributes() { - if a.Key() != objectV2.SysAttributeExpEpoch { - continue - } - - return strconv.ParseUint(a.Value(), 10, 64) - } - - return 0, errNoExpirationEpoch -} - -var ( - errDuplAttr = errors.New("duplication of attributes detected") - errEmptyAttrVal = errors.New("empty attribute value") -) - -func (v *FormatValidator) checkAttributes(obj *objectSDK.Object) error { - as := obj.Attributes() - - mUnique := make(map[string]struct{}, len(as)) - - for _, a := range as { - key := a.Key() - - if _, was := mUnique[key]; was { - return errDuplAttr - } - - if a.Value() == "" { - return errEmptyAttrVal - } - - mUnique[key] = struct{}{} - } - - return nil -} - -var errIncorrectOwner = errors.New("incorrect object owner") - -func (v *FormatValidator) checkOwner(obj *objectSDK.Object) error { - if idOwner := obj.OwnerID(); idOwner.IsEmpty() { - return errIncorrectOwner - } - - return nil -} - -// WithNetState returns options to set the network state interface. -func WithNetState(netState netmap.State) FormatValidatorOption { - return func(c *cfg) { - c.netState = netState - } -} - -// WithLockSource return option to set the Storage Engine. -func WithLockSource(e LockSource) FormatValidatorOption { - return func(c *cfg) { - c.e = e - } -} - -// WithInnerRing return option to set Inner Ring source. -func WithInnerRing(ir InnerRing) FormatValidatorOption { - return func(c *cfg) { - c.ir = ir - } -} - -// WithNetmapSource return option to set Netmap source. -func WithNetmapSource(ns netmap.Source) FormatValidatorOption { - return func(c *cfg) { - c.netmap = ns - } -} - -// WithContainersSource return option to set Containers source. -func WithContainersSource(cs container.Source) FormatValidatorOption { - return func(c *cfg) { - c.containers = cs - } -} - -// WithVerifySessionTokenIssuer return option to set verify session token issuer value. -func WithVerifySessionTokenIssuer(verifySessionTokenIssuer bool) FormatValidatorOption { - return func(c *cfg) { - c.verifyTokenIssuer = verifySessionTokenIssuer - } -} - -// WithLogger return option to set logger. -func WithLogger(l *logger.Logger) FormatValidatorOption { - return func(c *cfg) { - c.log = l - } -} diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go deleted file mode 100644 index dc336eb34..000000000 --- a/pkg/core/object/fmt_test.go +++ /dev/null @@ -1,599 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - "fmt" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object { - var idOwner user.ID - user.IDFromKey(&idOwner, key.PublicKey) - - obj := objectSDK.New() - obj.SetContainerID(cidtest.ID()) - obj.SetOwnerID(idOwner) - - return obj -} - -type testNetState struct { - epoch uint64 -} - -func (s testNetState) CurrentEpoch() uint64 { - return s.epoch -} - -type testLockSource struct { - m map[oid.Address]bool -} - -func (t testLockSource) IsLocked(_ context.Context, address oid.Address) (bool, error) { - return t.m[address], nil -} - -func TestFormatValidator_Validate(t *testing.T) { - const curEpoch = 13 - - ls := testLockSource{ - m: make(map[oid.Address]bool), - } - - v := NewFormatValidator( - WithNetState(testNetState{ - epoch: curEpoch, - }), - WithLockSource(ls), - WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), - ) - - ownerKey, err := keys.NewPrivateKey() - require.NoError(t, err) - - t.Run("nil input", func(t *testing.T) { - require.Error(t, v.Validate(context.Background(), nil, true)) - }) - - t.Run("nil identifier", func(t *testing.T) { - obj := objectSDK.New() - - require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID) - }) - - t.Run("nil container identifier", func(t *testing.T) { - obj := objectSDK.New() - obj.SetID(oidtest.ID()) - - require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID) - }) - - t.Run("unsigned object", func(t *testing.T) { - obj := objectSDK.New() - obj.SetContainerID(cidtest.ID()) - obj.SetID(oidtest.ID()) - - require.Error(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("correct w/ session token", func(t *testing.T) { - var idOwner user.ID - user.IDFromKey(&idOwner, ownerKey.PrivateKey.PublicKey) - - tok := sessiontest.Object() - err := tok.Sign(ownerKey.PrivateKey) - require.NoError(t, err) - - obj := objectSDK.New() - obj.SetContainerID(cidtest.ID()) - obj.SetSessionToken(tok) - obj.SetOwnerID(idOwner) - - require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj)) - - require.NoError(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("correct w/o session token", func(t *testing.T) { - obj := blankValidObject(&ownerKey.PrivateKey) - - require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj)) - - require.NoError(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("tombstone content", func(t *testing.T) { - obj := objectSDK.New() - obj.SetType(objectSDK.TypeTombstone) - obj.SetContainerID(cidtest.ID()) - - _, err := v.ValidateContent(obj) - require.Error(t, err) // no tombstone content - - content := objectSDK.NewTombstone() - content.SetMembers([]oid.ID{oidtest.ID()}) - - data, err := content.Marshal() - require.NoError(t, err) - - obj.SetPayload(data) - - _, err = v.ValidateContent(obj) - require.Error(t, err) // no members in tombstone - - content.SetMembers([]oid.ID{oidtest.ID()}) - - data, err = content.Marshal() - require.NoError(t, err) - - obj.SetPayload(data) - - _, err = v.ValidateContent(obj) - require.Error(t, err) // no expiration epoch in tombstone - - var expirationAttribute objectSDK.Attribute - expirationAttribute.SetKey(objectV2.SysAttributeExpEpoch) - expirationAttribute.SetValue(strconv.Itoa(10)) - - obj.SetAttributes(expirationAttribute) - - _, err = v.ValidateContent(obj) - require.Error(t, err) // different expiration values - - id := oidtest.ID() - - content.SetExpirationEpoch(10) - content.SetMembers([]oid.ID{id}) - data, err = content.Marshal() - require.NoError(t, err) - - obj.SetPayload(data) - - contentGot, err := v.ValidateContent(obj) - require.NoError(t, err) // all good - - require.EqualValues(t, []oid.ID{id}, contentGot.Objects()) - require.Equal(t, objectSDK.TypeTombstone, contentGot.Type()) - }) - - t.Run("expiration", func(t *testing.T) { - fn := func(val string) *objectSDK.Object { - obj := blankValidObject(&ownerKey.PrivateKey) - - var a objectSDK.Attribute - a.SetKey(objectV2.SysAttributeExpEpoch) - a.SetValue(val) - - obj.SetAttributes(a) - - require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj)) - - return obj - } - - t.Run("invalid attribute value", func(t *testing.T) { - val := "text" - err := v.Validate(context.Background(), fn(val), false) - require.Error(t, err) - err = v.Validate(context.Background(), fn(val), true) - require.Error(t, err) - }) - - t.Run("expired object", func(t *testing.T) { - val := strconv.FormatUint(curEpoch-1, 10) - obj := fn(val) - - t.Run("non-locked", func(t *testing.T) { - err := v.Validate(context.Background(), obj, false) - require.ErrorIs(t, err, errExpired) - }) - - t.Run("locked", func(t *testing.T) { - var addr oid.Address - oID, _ := obj.ID() - cID, _ := obj.ContainerID() - - addr.SetContainer(cID) - addr.SetObject(oID) - ls.m[addr] = true - - err := v.Validate(context.Background(), obj, false) - require.NoError(t, err) - }) - }) - - t.Run("alive object", func(t *testing.T) { - val := strconv.FormatUint(curEpoch, 10) - err := v.Validate(context.Background(), fn(val), true) - require.NoError(t, err) - }) - }) - - t.Run("attributes", func(t *testing.T) { - t.Run("duplication", func(t *testing.T) { - obj := blankValidObject(&ownerKey.PrivateKey) - - var a1 objectSDK.Attribute - a1.SetKey("key1") - a1.SetValue("val1") - - var a2 objectSDK.Attribute - a2.SetKey("key2") - a2.SetValue("val2") - - obj.SetAttributes(a1, a2) - - err := v.checkAttributes(obj) - require.NoError(t, err) - - a2.SetKey(a1.Key()) - obj.SetAttributes(a1, a2) - - err = v.checkAttributes(obj) - require.Equal(t, errDuplAttr, err) - }) - - t.Run("empty value", func(t *testing.T) { - obj := blankValidObject(&ownerKey.PrivateKey) - - var a objectSDK.Attribute - a.SetKey("key") - - obj.SetAttributes(a) - - err := v.checkAttributes(obj) - require.Equal(t, errEmptyAttrVal, err) - }) - }) -} - -func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { - const curEpoch = 13 - - ls := testLockSource{ - m: make(map[oid.Address]bool), - } - - signer, err := keys.NewPrivateKey() - require.NoError(t, err) - - var owner user.ID - ownerPrivKey, err := keys.NewPrivateKey() - require.NoError(t, err) - user.IDFromKey(&owner, ownerPrivKey.PrivateKey.PublicKey) - - t.Run("different issuer and owner, verify issuer disabled", func(t *testing.T) { - t.Parallel() - v := NewFormatValidator( - WithNetState(testNetState{ - epoch: curEpoch, - }), - WithLockSource(ls), - WithVerifySessionTokenIssuer(false), - WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), - ) - - tok := sessiontest.Object() - fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey()) - tok.SetID(uuid.New()) - tok.SetAuthKey(&fsPubKey) - tok.SetExp(100500) - tok.SetIat(1) - tok.SetNbf(1) - require.NoError(t, tok.Sign(signer.PrivateKey)) - - obj := objectSDK.New() - obj.SetContainerID(cidtest.ID()) - obj.SetSessionToken(tok) - obj.SetOwnerID(owner) - require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj)) - - require.NoError(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("different issuer and owner, issuer is IR node, verify issuer enabled", func(t *testing.T) { - t.Parallel() - - cnrID := cidtest.ID() - cont := containerSDK.Container{} - cont.Init() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - cont.SetPlacementPolicy(pp) - - v := NewFormatValidator( - WithNetState(testNetState{ - epoch: curEpoch, - }), - WithLockSource(ls), - WithVerifySessionTokenIssuer(true), - WithInnerRing(&testIRSource{ - irNodes: [][]byte{signer.PublicKey().Bytes()}, - }), - WithContainersSource( - &testContainerSource{ - containers: map[cid.ID]*container.Container{ - cnrID: { - Value: cont, - }, - }, - }, - ), - WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), - ) - - tok := sessiontest.Object() - fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey()) - tok.SetID(uuid.New()) - tok.SetAuthKey(&fsPubKey) - tok.SetExp(100500) - tok.SetIat(1) - tok.SetNbf(1) - require.NoError(t, tok.Sign(signer.PrivateKey)) - - obj := objectSDK.New() - obj.SetContainerID(cnrID) - obj.SetSessionToken(tok) - obj.SetOwnerID(owner) - require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj)) - - require.NoError(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("different issuer and owner, issuer is container node in current epoch, verify issuer enabled", func(t *testing.T) { - t.Parallel() - - tok := sessiontest.Object() - fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey()) - tok.SetID(uuid.New()) - tok.SetAuthKey(&fsPubKey) - tok.SetExp(100500) - tok.SetIat(1) - tok.SetNbf(1) - require.NoError(t, tok.Sign(signer.PrivateKey)) - - cnrID := cidtest.ID() - cont := containerSDK.Container{} - cont.Init() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - cont.SetPlacementPolicy(pp) - - var node netmap.NodeInfo - node.SetPublicKey(signer.PublicKey().Bytes()) - currentEpochNM := &netmap.NetMap{} - currentEpochNM.SetEpoch(curEpoch) - currentEpochNM.SetNodes([]netmap.NodeInfo{node}) - - obj := objectSDK.New() - obj.SetContainerID(cnrID) - obj.SetSessionToken(tok) - obj.SetOwnerID(owner) - require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj)) - - v := NewFormatValidator( - WithNetState(testNetState{ - epoch: curEpoch, - }), - WithLockSource(ls), - WithVerifySessionTokenIssuer(true), - WithInnerRing(&testIRSource{ - irNodes: [][]byte{}, - }), - WithContainersSource( - &testContainerSource{ - containers: map[cid.ID]*container.Container{ - cnrID: { - Value: cont, - }, - }, - }, - ), - WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ - curEpoch: currentEpochNM, - }, - CurrentEpoch: curEpoch, - }, - ), - WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), - ) - - require.NoError(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("different issuer and owner, issuer is container node in previous epoch, verify issuer enabled", func(t *testing.T) { - t.Parallel() - - tok := sessiontest.Object() - fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey()) - tok.SetID(uuid.New()) - tok.SetAuthKey(&fsPubKey) - tok.SetExp(100500) - tok.SetIat(1) - tok.SetNbf(1) - require.NoError(t, tok.Sign(signer.PrivateKey)) - - cnrID := cidtest.ID() - cont := containerSDK.Container{} - cont.Init() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - cont.SetPlacementPolicy(pp) - - var issuerNode netmap.NodeInfo - issuerNode.SetPublicKey(signer.PublicKey().Bytes()) - - var nonIssuerNode netmap.NodeInfo - nonIssuerKey, err := keys.NewPrivateKey() - require.NoError(t, err) - nonIssuerNode.SetPublicKey(nonIssuerKey.PublicKey().Bytes()) - - currentEpochNM := &netmap.NetMap{} - currentEpochNM.SetEpoch(curEpoch) - currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode}) - - previousEpochNM := &netmap.NetMap{} - previousEpochNM.SetEpoch(curEpoch - 1) - previousEpochNM.SetNodes([]netmap.NodeInfo{issuerNode}) - - obj := objectSDK.New() - obj.SetContainerID(cnrID) - obj.SetSessionToken(tok) - obj.SetOwnerID(owner) - require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj)) - - v := NewFormatValidator( - WithNetState(testNetState{ - epoch: curEpoch, - }), - WithLockSource(ls), - WithVerifySessionTokenIssuer(true), - WithInnerRing(&testIRSource{ - irNodes: [][]byte{}, - }), - WithContainersSource( - &testContainerSource{ - containers: map[cid.ID]*container.Container{ - cnrID: { - Value: cont, - }, - }, - }, - ), - WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ - curEpoch: currentEpochNM, - curEpoch - 1: previousEpochNM, - }, - CurrentEpoch: curEpoch, - }, - ), - WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), - ) - - require.NoError(t, v.Validate(context.Background(), obj, false)) - }) - - t.Run("different issuer and owner, issuer is unknown, verify issuer enabled", func(t *testing.T) { - t.Parallel() - - tok := sessiontest.Object() - fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey()) - tok.SetID(uuid.New()) - tok.SetAuthKey(&fsPubKey) - tok.SetExp(100500) - tok.SetIat(1) - tok.SetNbf(1) - require.NoError(t, tok.Sign(signer.PrivateKey)) - - cnrID := cidtest.ID() - cont := containerSDK.Container{} - cont.Init() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - cont.SetPlacementPolicy(pp) - - var nonIssuerNode1 netmap.NodeInfo - nonIssuerKey1, err := keys.NewPrivateKey() - require.NoError(t, err) - nonIssuerNode1.SetPublicKey(nonIssuerKey1.PublicKey().Bytes()) - - var nonIssuerNode2 netmap.NodeInfo - nonIssuerKey2, err := keys.NewPrivateKey() - require.NoError(t, err) - nonIssuerNode2.SetPublicKey(nonIssuerKey2.PublicKey().Bytes()) - - currentEpochNM := &netmap.NetMap{} - currentEpochNM.SetEpoch(curEpoch) - currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode1}) - - previousEpochNM := &netmap.NetMap{} - previousEpochNM.SetEpoch(curEpoch - 1) - previousEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode2}) - - obj := objectSDK.New() - obj.SetContainerID(cnrID) - obj.SetSessionToken(tok) - obj.SetOwnerID(owner) - require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj)) - - v := NewFormatValidator( - WithNetState(testNetState{ - epoch: curEpoch, - }), - WithLockSource(ls), - WithVerifySessionTokenIssuer(true), - WithInnerRing(&testIRSource{ - irNodes: [][]byte{}, - }), - WithContainersSource( - &testContainerSource{ - containers: map[cid.ID]*container.Container{ - cnrID: { - Value: cont, - }, - }, - }, - ), - WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ - curEpoch: currentEpochNM, - curEpoch - 1: previousEpochNM, - }, - CurrentEpoch: curEpoch, - }, - ), - WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), - ) - - require.Error(t, v.Validate(context.Background(), obj, false)) - }) -} - -type testIRSource struct { - irNodes [][]byte -} - -func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { - return s.irNodes, nil -} - -type testContainerSource struct { - containers map[cid.ID]*container.Container -} - -func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { - if cnr, found := s.containers[cnrID]; found { - return cnr, nil - } - return nil, fmt.Errorf("container not found") -} - -func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { - return nil, nil -} diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go deleted file mode 100644 index aab12ebf9..000000000 --- a/pkg/core/object/info.go +++ /dev/null @@ -1,34 +0,0 @@ -package object - -import ( - "fmt" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type ECInfo struct { - ParentID oid.ID - Index uint32 - Total uint32 -} - -func (v *ECInfo) String() string { - if v == nil { - return "" - } - return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) -} - -// Info groups object address with its FrostFS -// object info. -type Info struct { - Address oid.Address - Type objectSDK.Type - IsLinkingObject bool - ECInfo *ECInfo -} - -func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) -} diff --git a/pkg/core/object/object.go b/pkg/core/object/object.go deleted file mode 100644 index 9c450966c..000000000 --- a/pkg/core/object/object.go +++ /dev/null @@ -1,23 +0,0 @@ -package object - -import ( - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// AddressOf returns the address of the object. -func AddressOf(obj *objectSDK.Object) oid.Address { - var addr oid.Address - - id, ok := obj.ID() - if ok { - addr.SetObject(id) - } - - cnr, ok := obj.ContainerID() - if ok { - addr.SetContainer(cnr) - } - - return addr -} diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go deleted file mode 100644 index 3733ed507..000000000 --- a/pkg/core/object/sender_classifier.go +++ /dev/null @@ -1,164 +0,0 @@ -package object - -import ( - "bytes" - "context" - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" -) - -type InnerRing interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) -} - -type SenderClassifier struct { - log *logger.Logger - innerRing InnerRing - netmap core.Source -} - -func NewSenderClassifier(innerRing InnerRing, netmap core.Source, log *logger.Logger) SenderClassifier { - return SenderClassifier{ - log: log, - innerRing: innerRing, - netmap: netmap, - } -} - -type ClassifyResult struct { - Role acl.Role - Key []byte -} - -func (c SenderClassifier) Classify( - ctx context.Context, - ownerID *user.ID, - ownerKey *keys.PublicKey, - idCnr cid.ID, - cnr container.Container, -) (res *ClassifyResult, err error) { - ownerKeyInBytes := ownerKey.Bytes() - - // TODO: #767 get owner from frostfs.id if present - - // if request owner is the same as container owner, return RoleUser - if ownerID.Equals(cnr.Owner()) { - return &ClassifyResult{ - Role: acl.RoleOwner, - Key: ownerKeyInBytes, - }, nil - } - - return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr) -} - -func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { - isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) - if err != nil { - // do not throw error, try best case matching - c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, - zap.Error(err)) - } else if isInnerRingNode { - return &ClassifyResult{ - Role: acl.RoleInnerRing, - Key: ownerKeyInBytes, - }, nil - } - - binCnr := make([]byte, sha256.Size) - idCnr.Encode(binCnr) - - isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) - if err != nil { - // error might happen if request has `RoleOther` key and placement - // is not possible for previous epoch, so - // do not throw error, try best case matching - c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, - zap.Error(err)) - } else if isContainerNode { - return &ClassifyResult{ - Role: acl.RoleContainer, - Key: ownerKeyInBytes, - }, nil - } - - // if none of above, return RoleOthers - return &ClassifyResult{ - Role: acl.RoleOthers, - Key: ownerKeyInBytes, - }, nil -} - -func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { - innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) - if err != nil { - return false, err - } - - // if request owner key in the inner ring list, return RoleSystem - for i := range innerRingKeys { - if bytes.Equal(innerRingKeys[i], owner) { - return true, nil - } - } - - return false, nil -} - -func (c SenderClassifier) isContainerKey( - ctx context.Context, - owner, idCnr []byte, - cnr container.Container, -) (bool, error) { - nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap - if err != nil { - return false, err - } - - in, err := LookupKeyInContainer(nm, owner, idCnr, cnr) - if err != nil { - return false, err - } else if in { - return true, nil - } - - // then check previous netmap, this can happen in-between epoch change - // when node migrates data from last epoch container - nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) - if err != nil { - return false, err - } - - return LookupKeyInContainer(nm, owner, idCnr, cnr) -} - -func LookupKeyInContainer( - nm *netmap.NetMap, - pkey, idCnr []byte, - cnr container.Container, -) (bool, error) { - cnrVectors, err := nm.ContainerNodes(cnr.PlacementPolicy(), idCnr) - if err != nil { - return false, err - } - - for i := range cnrVectors { - for j := range cnrVectors[i] { - if bytes.Equal(cnrVectors[i][j].PublicKey(), pkey) { - return true, nil - } - } - } - - return false, nil -} diff --git a/pkg/core/policy/ec.go b/pkg/core/policy/ec.go deleted file mode 100644 index 846af775a..000000000 --- a/pkg/core/policy/ec.go +++ /dev/null @@ -1,20 +0,0 @@ -package policy - -import ( - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// IsECPlacement returns True if policy is erasure coding policy. -func IsECPlacement(policy netmapSDK.PlacementPolicy) bool { - return policy.NumberOfReplicas() == 1 && policy.ReplicaDescriptor(0).GetECDataCount() > 0 -} - -// ECDataCount returns EC data count for EC placement policy. -func ECDataCount(policy netmapSDK.PlacementPolicy) int { - return int(policy.ReplicaDescriptor(0).GetECDataCount()) -} - -// ECParityCount returns EC parity count for EC placement policy. -func ECParityCount(policy netmapSDK.PlacementPolicy) int { - return int(policy.ReplicaDescriptor(0).GetECParityCount()) -} diff --git a/pkg/core/version/version.go b/pkg/core/version/version.go deleted file mode 100644 index eb759a993..000000000 --- a/pkg/core/version/version.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" -) - -// IsValid checks if Version is not earlier than the genesis version of the FrostFS. -func IsValid(v version.Version) bool { - const ( - startMajor = 2 - startMinor = 7 - ) - - mjr := v.Major() - - return mjr > startMajor || mjr == startMajor && v.Minor() >= startMinor -} diff --git a/pkg/core/version/version_test.go b/pkg/core/version/version_test.go deleted file mode 100644 index 1ef18c521..000000000 --- a/pkg/core/version/version_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package version_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" - versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "github.com/stretchr/testify/require" -) - -func TestIsValid(t *testing.T) { - require.True(t, version.IsValid(versionSDK.Current())) - - var v versionSDK.Version - - for _, item := range []struct { - mjr, mnr uint32 - valid bool - }{ - {mjr: 0, mnr: 0, valid: false}, - {mjr: 2, mnr: 6, valid: false}, - {mjr: 2, mnr: 7, valid: true}, - {mjr: 3, mnr: 0, valid: true}, - } { - v.SetMajor(item.mjr) - v.SetMinor(item.mnr) - - require.Equal(t, item.valid, version.IsValid(v), item) - } -} diff --git a/pkg/innerring/alphabet.go b/pkg/innerring/alphabet.go deleted file mode 100644 index ddb344403..000000000 --- a/pkg/innerring/alphabet.go +++ /dev/null @@ -1,135 +0,0 @@ -package innerring - -import "github.com/nspcc-dev/neo-go/pkg/util" - -type GlagoliticLetter int8 - -const ( - _ GlagoliticLetter = iota - 1 - - az - buky - vedi - glagoli - dobro - yest - zhivete - dzelo - zemlja - izhe - izhei - gerv - kako - ljudi - mislete - nash - on - pokoj - rtsi - slovo - tverdo - uk - fert - kher - oht - shta - tsi - cherv - sha - yer - yeri - yerj - yat - jo - yu - smallYus - smallIotatedYus - bigYus - bigIotatedYus - fita - izhitsa - - lastLetterNum -) - -var glagolicLetterToString = map[GlagoliticLetter]string{ - az: "az", - buky: "buky", - vedi: "vedi", - glagoli: "glagoli", - dobro: "dobro", - yest: "yest", - zhivete: "zhivete", - dzelo: "dzelo", - zemlja: "zemlja", - izhe: "izhe", - izhei: "izhei", - gerv: "gerv", - kako: "kako", - ljudi: "ljudi", - mislete: "mislete", - nash: "nash", - on: "on", - pokoj: "pokoj", - rtsi: "rtsi", - slovo: "slovo", - tverdo: "tverdo", - uk: "uk", - fert: "fert", - kher: "kher", - oht: "oht", - shta: "shta", - tsi: "tsi", - cherv: "cherv", - sha: "sha", - yer: "yer", - yeri: "yeri", - yerj: "yerj", - yat: "yat", - jo: "jo", - yu: "yu", - smallYus: "small.yus", - smallIotatedYus: "small.iotated.yus", - bigYus: "big.yus", - bigIotatedYus: "big.iotated.yus", - fita: "fita", - izhitsa: "izhitsa", -} - -// String returns l in config-compatible format. -func (l GlagoliticLetter) String() string { - if str, found := glagolicLetterToString[l]; found { - return str - } - return "unknown" -} - -type AlphabetContracts map[GlagoliticLetter]util.Uint160 - -func NewAlphabetContracts() AlphabetContracts { - return make(map[GlagoliticLetter]util.Uint160, lastLetterNum) -} - -func (a AlphabetContracts) GetByIndex(ind int) (util.Uint160, bool) { - if ind < 0 || ind >= int(lastLetterNum) { - return util.Uint160{}, false - } - - contract, ok := a[GlagoliticLetter(ind)] - - return contract, ok -} - -func (a AlphabetContracts) indexOutOfRange(ind int) bool { - return ind < 0 && ind >= len(a) -} - -func (a AlphabetContracts) iterate(f func(GlagoliticLetter, util.Uint160)) { - for letter, contract := range a { - f(letter, contract) - } -} - -func (a *AlphabetContracts) set(l GlagoliticLetter, h util.Uint160) { - (*a)[l] = h -} diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go deleted file mode 100644 index dfada764a..000000000 --- a/pkg/innerring/bindings.go +++ /dev/null @@ -1,44 +0,0 @@ -package innerring - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" -) - -type ( - // ContractProcessor interface defines functions for binding event producers - // such as event.Listener and Timers with contract processor. - ContractProcessor interface { - ListenerNotificationHandlers() []event.NotificationHandlerInfo - ListenerNotaryParsers() []event.NotaryParserInfo - ListenerNotaryHandlers() []event.NotaryHandlerInfo - } -) - -func connectListenerWithProcessor(l event.Listener, p ContractProcessor) { - // register notification handlers - for _, handler := range p.ListenerNotificationHandlers() { - l.RegisterNotificationHandler(handler) - } - - // register notary parsers - for _, notaryParser := range p.ListenerNotaryParsers() { - l.SetNotaryParser(notaryParser) - } - - // register notary handlers - for _, notaryHandler := range p.ListenerNotaryHandlers() { - l.RegisterNotaryHandler(notaryHandler) - } -} - -// bindMorphProcessor connects morph chain listener handlers. -func bindMorphProcessor(proc ContractProcessor, s *Server) error { - connectListenerWithProcessor(s.morphListener, proc) - return nil -} - -// bindMainnetProcessor connects mainnet chain listener handlers. -func bindMainnetProcessor(proc ContractProcessor, s *Server) error { - connectListenerWithProcessor(s.mainnetListener, proc) - return nil -} diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go deleted file mode 100644 index 3f9d8df5f..000000000 --- a/pkg/innerring/blocktimer.go +++ /dev/null @@ -1,76 +0,0 @@ -package innerring - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" - timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type ( - epochState interface { - EpochCounter() uint64 - EpochDuration() uint64 - } - - newEpochHandler func() - - epochTimerArgs struct { - newEpochHandlers []newEpochHandler - - epoch epochState // to specify which epoch to stop, and epoch duration - } - - emitTimerArgs struct { - ap *alphabet.Processor // to handle new emission tick - - emitDuration uint32 // in blocks - } - - depositor func(context.Context) (util.Uint256, error) - awaiter func(context.Context, util.Uint256) error -) - -func (s *Server) addBlockTimer(t *timer.BlockTimer) { - s.blockTimers = append(s.blockTimers, t) -} - -func (s *Server) startBlockTimers() error { - for i := range s.blockTimers { - if err := s.blockTimers[i].Reset(); err != nil { - return err - } - } - - return nil -} - -func (s *Server) tickTimers(h uint32) { - for i := range s.blockTimers { - s.blockTimers[i].Tick(h) - } -} - -func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { - return timer.NewBlockTimer( - func() (uint32, error) { - return uint32(args.epoch.EpochDuration()), nil - }, - func() { - for _, handler := range args.newEpochHandlers { - handler() - } - }, - ) -} - -func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer { - return timer.NewBlockTimer( - timer.StaticBlockMeter(args.emitDuration), - func() { - args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{}) - }, - ) -} diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go deleted file mode 100644 index 4cbe7e394..000000000 --- a/pkg/innerring/blocktimer_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package innerring - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestEpochTimer(t *testing.T) { - t.Parallel() - neh := &testNewEpochHandler{} - epochState := &testEpochState{ - counter: 99, - duration: 10, - } - - args := &epochTimerArgs{ - newEpochHandlers: []newEpochHandler{neh.Handle}, - epoch: epochState, - } - et := newEpochTimer(args) - err := et.Reset() - require.NoError(t, err, "failed to reset timer") - - et.Tick(100) - require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - - et.Tick(101) - require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - - et.Tick(102) - require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - - et.Tick(103) - require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - - var h uint32 - for h = 104; h < 109; h++ { - et.Tick(h) - require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - } - - et.Tick(109) - require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - - et.Tick(110) - require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - - et.Tick(111) - require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - - et.Tick(112) - require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - - et.Tick(113) - require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - - for h = 114; h < 119; h++ { - et.Tick(h) - require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - } - et.Tick(120) - require.Equal(t, 2, neh.called, "invalid new epoch handler calls") -} - -type testNewEpochHandler struct { - called int -} - -func (h *testNewEpochHandler) Handle() { - h.called++ -} - -type testEpochState struct { - counter uint64 - duration uint64 -} - -func (s *testEpochState) EpochCounter() uint64 { - return s.counter -} - -func (s *testEpochState) EpochDuration() uint64 { - return s.duration -} diff --git a/pkg/innerring/config/fee.go b/pkg/innerring/config/fee.go deleted file mode 100644 index a26a7bcc6..000000000 --- a/pkg/innerring/config/fee.go +++ /dev/null @@ -1,29 +0,0 @@ -package config - -import ( - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/spf13/viper" -) - -// FeeConfig is an instance that returns extra fee values for contract -// invocations without notary support. -type FeeConfig struct { - mainchain, - sidechain fixedn.Fixed8 -} - -// NewFeeConfig constructs FeeConfig from viper.Viper instance. Latter must not be nil. -func NewFeeConfig(v *viper.Viper) *FeeConfig { - return &FeeConfig{ - mainchain: fixedn.Fixed8(v.GetInt64("fee.main_chain")), - sidechain: fixedn.Fixed8(v.GetInt64("fee.side_chain")), - } -} - -func (f FeeConfig) MainChainFee() fixedn.Fixed8 { - return f.mainchain -} - -func (f FeeConfig) SideChainFee() fixedn.Fixed8 { - return f.sidechain -} diff --git a/pkg/innerring/config/fee_test.go b/pkg/innerring/config/fee_test.go deleted file mode 100644 index ced21b238..000000000 --- a/pkg/innerring/config/fee_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package config - -import ( - "strings" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" -) - -func TestConfig(t *testing.T) { - t.Parallel() - t.Run("all set", func(t *testing.T) { - t.Parallel() - file := strings.NewReader( - ` -fee: - main_chain: 50000000 - side_chain: 200000000 -`, - ) - v := viper.New() - v.SetConfigType("yaml") - err := v.ReadConfig(file) - require.NoError(t, err, "read config file failed") - - config := NewFeeConfig(v) - require.Equal(t, fixedn.Fixed8(50000000), config.MainChainFee(), "main chain fee invalid") - require.Equal(t, fixedn.Fixed8(200000000), config.SideChainFee(), "side chain fee invalid") - }) - - t.Run("nothing set", func(t *testing.T) { - t.Parallel() - file := strings.NewReader("") - v := viper.New() - v.SetConfigType("yaml") - err := v.ReadConfig(file) - require.NoError(t, err, "read config file failed") - - config := NewFeeConfig(v) - require.Equal(t, fixedn.Fixed8(0), config.MainChainFee(), "main chain fee invalid") - require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid") - }) - - t.Run("partially set", func(t *testing.T) { - t.Parallel() - file := strings.NewReader( - ` -fee: - main_chain: 10 -`, - ) - v := viper.New() - v.SetConfigType("yaml") - err := v.ReadConfig(file) - require.NoError(t, err, "read config file failed") - - config := NewFeeConfig(v) - require.Equal(t, fixedn.Fixed8(10), config.MainChainFee(), "main chain fee invalid") - require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid") - }) -} diff --git a/pkg/innerring/contracts.go b/pkg/innerring/contracts.go deleted file mode 100644 index 4a80296f4..000000000 --- a/pkg/innerring/contracts.go +++ /dev/null @@ -1,127 +0,0 @@ -package innerring - -import ( - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/viper" -) - -type contracts struct { - frostfs util.Uint160 // in mainnet - netmap util.Uint160 // in morph - balance util.Uint160 // in morph - container util.Uint160 // in morph - proxy util.Uint160 // in morph - processing util.Uint160 // in mainnet - frostfsID util.Uint160 // in morph - - alphabet AlphabetContracts // in morph -} - -func parseContracts(cfg *viper.Viper, morph nnsResolver, withoutMainNet, withoutMainNotary bool) (*contracts, error) { - var ( - result = new(contracts) - err error - ) - - if !withoutMainNet { - result.frostfs, err = util.Uint160DecodeStringLE(cfg.GetString("contracts.frostfs")) - if err != nil { - return nil, fmt.Errorf("can't get frostfs script hash: %w", err) - } - - if !withoutMainNotary { - result.processing, err = util.Uint160DecodeStringLE(cfg.GetString("contracts.processing")) - if err != nil { - return nil, fmt.Errorf("can't get processing script hash: %w", err) - } - } - } - - result.proxy, err = parseContract(cfg, morph, "contracts.proxy", client.NNSProxyContractName) - if err != nil { - return nil, fmt.Errorf("can't get proxy script hash: %w", err) - } - - targets := [...]struct { - cfgName string - nnsName string - dest *util.Uint160 - }{ - {"contracts.netmap", client.NNSNetmapContractName, &result.netmap}, - {"contracts.balance", client.NNSBalanceContractName, &result.balance}, - {"contracts.container", client.NNSContainerContractName, &result.container}, - {"contracts.frostfsid", client.NNSFrostFSIDContractName, &result.frostfsID}, - } - - for _, t := range targets { - *t.dest, err = parseContract(cfg, morph, t.cfgName, t.nnsName) - if err != nil { - name := strings.TrimPrefix(t.cfgName, "contracts.") - return nil, fmt.Errorf("can't get %s script hash: %w", name, err) - } - } - - result.alphabet, err = parseAlphabetContracts(cfg, morph) - if err != nil { - return nil, err - } - - return result, nil -} - -func parseAlphabetContracts(cfg *viper.Viper, morph nnsResolver) (AlphabetContracts, error) { - num := GlagoliticLetter(cfg.GetUint("contracts.alphabet.amount")) - alpha := NewAlphabetContracts() - - if num > lastLetterNum { - return nil, fmt.Errorf("amount of alphabet contracts overflows glagolitsa %d > %d", num, lastLetterNum) - } - - thresholdIsSet := num != 0 - - if !thresholdIsSet { - // try to read maximum alphabet contracts - // if threshold has not been set manually - num = lastLetterNum - } - - for letter := az; letter < num; letter++ { - contractHash, err := parseContract(cfg, morph, - "contracts.alphabet."+letter.String(), - client.NNSAlphabetContractName(int(letter)), - ) - if err != nil { - if errors.Is(err, client.ErrNNSRecordNotFound) { - break - } - - return nil, fmt.Errorf("invalid alphabet %s contract: %w", letter, err) - } - - alpha.set(letter, contractHash) - } - - if thresholdIsSet && len(alpha) != int(num) { - return nil, fmt.Errorf("could not read all contracts: required %d, read %d", num, len(alpha)) - } - - return alpha, nil -} - -func parseContract(cfg *viper.Viper, morph nnsResolver, cfgName, nnsName string) (res util.Uint160, err error) { - contractStr := cfg.GetString(cfgName) - if len(contractStr) == 0 { - return morph.NNSContractAddress(nnsName) - } - - return util.Uint160DecodeStringLE(contractStr) -} - -type nnsResolver interface { - NNSContractAddress(name string) (sh util.Uint160, err error) -} diff --git a/pkg/innerring/contracts_test.go b/pkg/innerring/contracts_test.go deleted file mode 100644 index 0fb0442b2..000000000 --- a/pkg/innerring/contracts_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package innerring - -import ( - "strings" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" -) - -func TestParseContractsSuccess(t *testing.T) { - t.Parallel() - file := strings.NewReader(` -contracts: - frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 - processing: 597f5894867113a41e192801709c02497f611de8 - balance: d2aa48d14b17b11bc4c68205027884a96706dd16 - container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 - frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a - netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 - proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c - alphabet: - amount: 2 - az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea - buky: e2ba789320899658b100f331bdebb74474757920 -`) - - v := viper.New() - v.SetConfigType("yaml") - err := v.ReadConfig(file) - require.NoError(t, err, "read config file failed") - - t.Run("all enabled", func(t *testing.T) { - t.Parallel() - c, err := parseContracts(v, nil, false, false) - require.NoError(t, err, "failed to parse contracts") - - frostfsExp, _ := util.Uint160DecodeStringLE("ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62") - require.Equal(t, frostfsExp, c.frostfs, "invalid frostfs") - - processingExp, _ := util.Uint160DecodeStringLE("597f5894867113a41e192801709c02497f611de8") - require.Equal(t, processingExp, c.processing, "invalid processing") - - balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16") - require.Equal(t, balanceExp, c.balance, "invalid balance") - - containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6") - require.Equal(t, containerExp, c.container, "invalid container") - - frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a") - require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID") - - netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742") - require.Equal(t, netmapIDExp, c.netmap, "invalid netmap") - - proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c") - require.Equal(t, proxyExp, c.proxy, "invalid proxy") - - require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length") - - azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") - require.Equal(t, azExp, c.alphabet[az], "invalid az") - - bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") - require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky") - }) - - t.Run("all disabled", func(t *testing.T) { - t.Parallel() - c, err := parseContracts(v, nil, true, true) - require.NoError(t, err, "failed to parse contracts") - - require.Equal(t, util.Uint160{}, c.frostfs, "invalid frostfs") - - require.Equal(t, util.Uint160{}, c.processing, "invalid processing") - - balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16") - require.Equal(t, balanceExp, c.balance, "invalid balance") - - containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6") - require.Equal(t, containerExp, c.container, "invalid container") - - frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a") - require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID") - - netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742") - require.Equal(t, netmapIDExp, c.netmap, "invalid netmap") - - proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c") - require.Equal(t, proxyExp, c.proxy, "invalid proxy") - - require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length") - - azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") - require.Equal(t, azExp, c.alphabet[az], "invalid az") - - bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") - require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky") - }) - - t.Run("main notary disabled", func(t *testing.T) { - t.Parallel() - c, err := parseContracts(v, nil, false, true) - require.NoError(t, err, "failed to parse contracts") - - frostfsExp, _ := util.Uint160DecodeStringLE("ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62") - require.Equal(t, frostfsExp, c.frostfs, "invalid frostfs") - - require.Equal(t, util.Uint160{}, c.processing, "invalid processing") - - balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16") - require.Equal(t, balanceExp, c.balance, "invalid balance") - - containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6") - require.Equal(t, containerExp, c.container, "invalid container") - - frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a") - require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID") - - netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742") - require.Equal(t, netmapIDExp, c.netmap, "invalid netmap") - - proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c") - require.Equal(t, proxyExp, c.proxy, "invalid proxy") - - require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length") - - azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") - require.Equal(t, azExp, c.alphabet[az], "invalid az") - - bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") - require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky") - }) -} - -func TestParseContractsInvalid(t *testing.T) { - t.Parallel() - t.Run("invalid frostfs contract", func(t *testing.T) { - t.Parallel() - file := strings.NewReader(` -contracts: - frostfs: invalid_data - processing: 597f5894867113a41e192801709c02497f611de8 - balance: d2aa48d14b17b11bc4c68205027884a96706dd16 - container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 - frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a - netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 - proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c - alphabet: - amount: 2 - az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea - buky: e2ba789320899658b100f331bdebb74474757920 -`) - - v := viper.New() - v.SetConfigType("yaml") - err := v.ReadConfig(file) - require.NoError(t, err, "read config file failed") - - _, err = parseContracts(v, nil, false, false) - require.Error(t, err, "unexpected success") - }) - - t.Run("invalid alphabet count", func(t *testing.T) { - t.Parallel() - file := strings.NewReader(` -contracts: - frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 - processing: 597f5894867113a41e192801709c02497f611de8 - balance: d2aa48d14b17b11bc4c68205027884a96706dd16 - container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 - frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a - netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 - proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c - alphabet: - amount: 3 - az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea - buky: e2ba789320899658b100f331bdebb74474757920 -`) - - v := viper.New() - v.SetConfigType("yaml") - err := v.ReadConfig(file) - require.NoError(t, err, "read config file failed") - - azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") - bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") - - morph := &testParserMorph{ - values: map[string]util.Uint160{ - "az": azExp, - "buky": bukyExp, - }, - } - - _, err = parseContracts(v, morph, false, false) - require.ErrorContains(t, err, "could not read all contracts: required 3, read 2", "unexpected success") - }) -} - -type testParserMorph struct { - values map[string]util.Uint160 -} - -func (m *testParserMorph) NNSContractAddress(name string) (sh util.Uint160, err error) { - if value, found := m.values[name]; found { - return value, nil - } - return util.Uint160{}, client.ErrNNSRecordNotFound -} diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go deleted file mode 100644 index 7deec3f31..000000000 --- a/pkg/innerring/fetcher.go +++ /dev/null @@ -1,60 +0,0 @@ -package innerring - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// NewIRFetcherWithNotary creates IrFetcherWithNotary. -// -// IrFetcherWithNotary can be used to obtain innerring key list if -// network that client is connected to supports notary contract. -// -// Passed client is required. Panics if nil. -func NewIRFetcherWithNotary(cli *client.Client) *IrFetcherWithNotary { - if cli == nil { - panic("could not init IRFetcher with notary: client must not be nil") - } - return &IrFetcherWithNotary{cli: cli} -} - -// NewIRFetcherWithoutNotary creates IrFetcherWithoutNotary. -// -// IrFetcherWithoutNotary must be used to obtain innerring key list if -// network that netmap wrapper is connected to does not support notary -// contract. -// -// Passed netmap wrapper is required. Panics if nil. -func NewIRFetcherWithoutNotary(nm *nmClient.Client) *IrFetcherWithoutNotary { - if nm == nil { - panic("could not init IRFetcher without notary: netmap wrapper must not be nil") - } - return &IrFetcherWithoutNotary{nm: nm} -} - -// IrFetcherWithNotary fetches keys using notary contract. Must be created -// with NewIRFetcherWithNotary. -type IrFetcherWithNotary struct { - cli *client.Client -} - -// IrFetcherWithoutNotary fetches keys using netmap contract. Must be created -// with NewIRFetcherWithoutNotary. -type IrFetcherWithoutNotary struct { - nm *nmClient.Client -} - -// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet -// role in the sidechain. -func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { - return fN.cli.NeoFSAlphabetList(ctx) -} - -// InnerRingKeys fetches list of innerring keys from netmap contract -// in the sidechain. -func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { - return f.nm.GetInnerRingList(ctx) -} diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go deleted file mode 100644 index 439400bac..000000000 --- a/pkg/innerring/indexer.go +++ /dev/null @@ -1,121 +0,0 @@ -package innerring - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type ( - irFetcher interface { - InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) - } - - committeeFetcher interface { - Committee() (keys.PublicKeys, error) - } - - innerRingIndexer struct { - sync.RWMutex - - irFetcher irFetcher - commFetcher committeeFetcher - key *keys.PublicKey - timeout time.Duration - - ind indexes - - lastAccess time.Time - } - - indexes struct { - innerRingIndex, innerRingSize int32 - alphabetIndex int32 - } -) - -func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicKey, to time.Duration) *innerRingIndexer { - return &innerRingIndexer{ - irFetcher: irf, - commFetcher: comf, - key: key, - timeout: to, - } -} - -func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { - s.RLock() - - if time.Since(s.lastAccess) < s.timeout { - s.RUnlock() - return s.ind, nil - } - - s.RUnlock() - - s.Lock() - defer s.Unlock() - - if time.Since(s.lastAccess) < s.timeout { - return s.ind, nil - } - - innerRing, err := s.irFetcher.InnerRingKeys(ctx) - if err != nil { - return indexes{}, err - } - - s.ind.innerRingIndex = keyPosition(s.key, innerRing) - s.ind.innerRingSize = int32(len(innerRing)) - - alphabet, err := s.commFetcher.Committee() - if err != nil { - return indexes{}, err - } - - s.ind.alphabetIndex = keyPosition(s.key, alphabet) - s.lastAccess = time.Now() - - return s.ind, nil -} - -func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { - ind, err := s.update(ctx) - if err != nil { - return 0, fmt.Errorf("can't update index state: %w", err) - } - - return ind.innerRingIndex, nil -} - -func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { - ind, err := s.update(ctx) - if err != nil { - return 0, fmt.Errorf("can't update index state: %w", err) - } - - return ind.innerRingSize, nil -} - -func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { - ind, err := s.update(ctx) - if err != nil { - return 0, fmt.Errorf("can't update index state: %w", err) - } - - return ind.alphabetIndex, nil -} - -// keyPosition returns "-1" if key is not found in the list, otherwise returns -// index of the key. -func keyPosition(key *keys.PublicKey, list keys.PublicKeys) int32 { - for i := range list { - if key.Equal(list[i]) { - return int32(i) - } - } - return -1 -} diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go deleted file mode 100644 index f8201b7df..000000000 --- a/pkg/innerring/indexer_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package innerring - -import ( - "context" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestIndexerReturnsIndexes(t *testing.T) { - t.Parallel() - commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{ - "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae", - "022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131", - }) - require.NoError(t, err, "convert string to commitee public keys failed") - cf := &testCommiteeFetcher{ - keys: commiteeKeys, - } - - irKeys, err := keys.NewPublicKeysFromStrings([]string{ - "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35", - "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3", - "022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131", - }) - require.NoError(t, err, "convert string to IR public keys failed") - irf := &testIRFetcher{ - keys: irKeys, - } - - t.Run("success", func(t *testing.T) { - t.Parallel() - key := irKeys[2] - - indexer := newInnerRingIndexer(cf, irf, key, time.Second) - - idx, err := indexer.AlphabetIndex(context.Background()) - require.NoError(t, err, "failed to get alphabet index") - require.Equal(t, int32(1), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.NoError(t, err, "failed to get IR index") - require.Equal(t, int32(2), idx, "invalid IR index") - - size, err := indexer.InnerRingSize(context.Background()) - require.NoError(t, err, "failed to get IR size") - require.Equal(t, int32(3), size, "invalid IR size") - }) - - t.Run("not found alphabet", func(t *testing.T) { - t.Parallel() - key := irKeys[0] - - indexer := newInnerRingIndexer(cf, irf, key, time.Second) - - idx, err := indexer.AlphabetIndex(context.Background()) - require.NoError(t, err, "failed to get alphabet index") - require.Equal(t, int32(-1), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.NoError(t, err, "failed to get IR index") - require.Equal(t, int32(0), idx, "invalid IR index") - }) - - t.Run("not found IR", func(t *testing.T) { - t.Parallel() - key := commiteeKeys[0] - - indexer := newInnerRingIndexer(cf, irf, key, time.Second) - - idx, err := indexer.AlphabetIndex(context.Background()) - require.NoError(t, err, "failed to get alphabet index") - require.Equal(t, int32(0), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.NoError(t, err, "failed to get IR index") - require.Equal(t, int32(-1), idx, "invalid IR index") - }) -} - -func TestIndexerCachesIndexes(t *testing.T) { - t.Parallel() - commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{}) - require.NoError(t, err, "convert string to commitee public keys failed") - cf := &testCommiteeFetcher{ - keys: commiteeKeys, - } - - irKeys, err := keys.NewPublicKeysFromStrings([]string{}) - require.NoError(t, err, "convert string to IR public keys failed") - irf := &testIRFetcher{ - keys: irKeys, - } - - key, err := keys.NewPublicKeyFromString("022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131") - require.NoError(t, err, "convert string to public key failed") - - indexer := newInnerRingIndexer(cf, irf, key, time.Second) - - idx, err := indexer.AlphabetIndex(context.Background()) - require.NoError(t, err, "failed to get alphabet index") - require.Equal(t, int32(-1), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.NoError(t, err, "failed to get IR index") - require.Equal(t, int32(-1), idx, "invalid IR index") - - size, err := indexer.InnerRingSize(context.Background()) - require.NoError(t, err, "failed to get IR size") - require.Equal(t, int32(0), size, "invalid IR size") - - require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") - require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") - - idx, err = indexer.AlphabetIndex(context.Background()) - require.NoError(t, err, "failed to get alphabet index") - require.Equal(t, int32(-1), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.NoError(t, err, "failed to get IR index") - require.Equal(t, int32(-1), idx, "invalid IR index") - - size, err = indexer.InnerRingSize(context.Background()) - require.NoError(t, err, "failed to get IR size") - require.Equal(t, int32(0), size, "invalid IR size") - - require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") - require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") - - time.Sleep(2 * time.Second) - - idx, err = indexer.AlphabetIndex(context.Background()) - require.NoError(t, err, "failed to get alphabet index") - require.Equal(t, int32(-1), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.NoError(t, err, "failed to get IR index") - require.Equal(t, int32(-1), idx, "invalid IR index") - - size, err = indexer.InnerRingSize(context.Background()) - require.NoError(t, err, "failed to get IR size") - require.Equal(t, int32(0), size, "invalid IR size") - - require.Equal(t, int32(2), cf.calls.Load(), "invalid commitee calls count") - require.Equal(t, int32(2), irf.calls.Load(), "invalid IR calls count") -} - -func TestIndexerThrowsErrors(t *testing.T) { - t.Parallel() - cf := &testCommiteeFetcher{ - err: fmt.Errorf("test commitee error"), - } - - irKeys, err := keys.NewPublicKeysFromStrings([]string{}) - require.NoError(t, err, "convert string to IR public keys failed") - irf := &testIRFetcher{ - keys: irKeys, - } - - key, err := keys.NewPublicKeyFromString("022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131") - require.NoError(t, err, "convert string to public key failed") - - indexer := newInnerRingIndexer(cf, irf, key, time.Second) - - idx, err := indexer.AlphabetIndex(context.Background()) - require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") - require.Equal(t, int32(0), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") - require.Equal(t, int32(0), idx, "invalid IR index") - - size, err := indexer.InnerRingSize(context.Background()) - require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") - require.Equal(t, int32(0), size, "invalid IR size") - - commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{}) - require.NoError(t, err, "convert string to commitee public keys failed") - cf = &testCommiteeFetcher{ - keys: commiteeKeys, - } - - irf = &testIRFetcher{ - err: fmt.Errorf("test IR error"), - } - - indexer = newInnerRingIndexer(cf, irf, key, time.Second) - - idx, err = indexer.AlphabetIndex(context.Background()) - require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") - require.Equal(t, int32(0), idx, "invalid alphabet index") - - idx, err = indexer.InnerRingIndex(context.Background()) - require.ErrorContains(t, err, "test IR error", "error from IR not throwed") - require.Equal(t, int32(0), idx, "invalid IR index") - - size, err = indexer.InnerRingSize(context.Background()) - require.ErrorContains(t, err, "test IR error", "error from IR not throwed") - require.Equal(t, int32(0), size, "invalid IR size") -} - -type testCommiteeFetcher struct { - keys keys.PublicKeys - err error - calls atomic.Int32 -} - -func (f *testCommiteeFetcher) Committee() (keys.PublicKeys, error) { - f.calls.Add(1) - return f.keys, f.err -} - -type testIRFetcher struct { - keys keys.PublicKeys - err error - calls atomic.Int32 -} - -func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { - f.calls.Add(1) - return f.keys, f.err -} - -func BenchmarkKeyPosition(b *testing.B) { - list := make(keys.PublicKeys, 7) - for i := range list { - p, err := keys.NewPrivateKey() - require.NoError(b, err) - list[i] = p.PublicKey() - } - - key := new(keys.PublicKey) - require.NoError(b, key.DecodeBytes(list[5].Bytes())) - - b.ResetTimer() - b.ReportAllocs() - for range b.N { - if keyPosition(key, list) != 5 { - b.FailNow() - } - } -} diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go deleted file mode 100644 index 3d236641e..000000000 --- a/pkg/innerring/initialization.go +++ /dev/null @@ -1,513 +0,0 @@ -package innerring - -import ( - "context" - "encoding/hex" - "fmt" - "net" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance" - cont "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" - nodevalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation" - addrvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/maddress" - statevalidation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - frostfsClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - controlsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" - utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/spf13/viper" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, - alphaSync event.Handler, -) error { - locodeValidator := s.newLocodeValidator(cfg) - - netSettings := (*networkSettings)(s.netmapClient) - - var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator - netMapCandidateStateValidator.SetNetworkSettings(netSettings) - - poolSize := cfg.GetInt("workers.netmap") - s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) - - var err error - s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log.WithTag(logger.TagProcessor), - Metrics: s.irMetrics, - PoolSize: poolSize, - NetmapClient: netmap.NewNetmapClient(s.netmapClient), - EpochTimer: s, - EpochState: s, - AlphabetState: s, - CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"), - CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"), - NotaryDepositHandler: s.onlyAlphabetEventHandler( - s.notaryHandler, - ), - AlphabetSyncHandler: s.onlyAlphabetEventHandler( - alphaSync, - ), - NodeValidator: nodevalidator.New( - &netMapCandidateStateValidator, - addrvalidator.New(), - locodeValidator, - ), - - NodeStateSettings: netSettings, - }) - if err != nil { - return err - } - - return bindMorphProcessor(s.netmapProcessor, s) -} - -func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *chainParams, errChan chan<- error) error { - s.withoutMainNet = cfg.GetBool("without_mainnet") - if s.withoutMainNet { - // This works as long as event Listener starts listening loop once, - // otherwise Server.Start will run two similar routines. - // This behavior most likely will not change. - s.mainnetListener = s.morphListener - s.mainnetClient = s.morphClient - return nil - } - - mainnetChain := morphChain - mainnetChain.name = mainnetPrefix - mainnetChain.sgn = &transaction.Signer{Scopes: transaction.CalledByEntry} - - fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) - if err != nil { - fromMainChainBlock = 0 - s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err)) - } - mainnetChain.from = fromMainChainBlock - - // create mainnet client - s.mainnetClient, err = createClient(ctx, mainnetChain, errChan) - if err != nil { - return err - } - - // create mainnet listener - s.mainnetListener, err = createListener(ctx, s.mainnetClient, mainnetChain) - return err -} - -func (s *Server) enableNotarySupport() error { - // enable notary support in the side client - err := s.morphClient.EnableNotarySupport( - client.WithProxyContract(s.contracts.proxy), - ) - if err != nil { - return fmt.Errorf("could not enable side chain notary support: %w", err) - } - - s.morphListener.EnableNotarySupport(s.contracts.proxy, s.morphClient.Committee, s.morphClient) - - if !s.mainNotaryConfig.disabled { - // enable notary support in the main client - err := s.mainnetClient.EnableNotarySupport( - client.WithProxyContract(s.contracts.processing), - client.WithAlphabetSource(s.morphClient.Committee), - ) - if err != nil { - return fmt.Errorf("could not enable main chain notary support: %w", err) - } - } - - return nil -} - -func (s *Server) initNotaryConfig(ctx context.Context) { - s.mainNotaryConfig = notaryConfigs( - !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too - ) - - s.log.Info(ctx, logs.InnerringNotarySupport, - zap.Bool("sidechain_enabled", true), - zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled), - ) -} - -func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Client, irf irFetcher) (event.Handler, error) { - var alphaSync event.Handler - - if s.withoutMainNet || cfg.GetBool("governance.disable") { - alphaSync = func(ctx context.Context, _ event.Event) { - s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled) - } - } else { - // create governance processor - governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log.WithTag(logger.TagProcessor), - Metrics: s.irMetrics, - FrostFSClient: frostfsCli, - AlphabetState: s, - EpochState: s, - Voter: s, - IRFetcher: irf, - MorphClient: s.morphClient, - MainnetClient: s.mainnetClient, - }) - if err != nil { - return nil, err - } - - alphaSync = governanceProcessor.HandleAlphabetSync - err = bindMainnetProcessor(governanceProcessor, s) - if err != nil { - return nil, err - } - } - - return alphaSync, nil -} - -func (s *Server) createIRFetcher() irFetcher { - var irf irFetcher - - if s.withoutMainNet || !s.mainNotaryConfig.disabled { - // if mainchain is disabled we should use NeoFSAlphabetList client method according to its docs - // (naming `...WithNotary` will not always be correct) - irf = NewIRFetcherWithNotary(s.morphClient) - } else { - irf = NewIRFetcherWithoutNotary(s.netmapClient) - } - - return irf -} - -func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) { - s.epochTimer = newEpochTimer(&epochTimerArgs{ - newEpochHandlers: s.newEpochTickHandlers(ctx), - epoch: s, - }) - - s.addBlockTimer(s.epochTimer) - - // initialize emission timer - emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{ - ap: s.alphabetProcessor, - emitDuration: cfg.GetUint32("timers.emit"), - }) - - s.addBlockTimer(emissionTimer) -} - -func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error { - parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) - if err != nil { - return err - } - poolSize := cfg.GetInt("workers.alphabet") - s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize)) - - // create alphabet processor - s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ - ParsedWallets: parsedWallets, - Log: s.log.WithTag(logger.TagProcessor), - Metrics: s.irMetrics, - PoolSize: poolSize, - AlphabetContracts: s.contracts.alphabet, - NetmapClient: s.netmapClient, - MorphClient: s.morphClient, - IRList: s, - StorageEmission: cfg.GetUint64("emit.storage.amount"), - }) - if err != nil { - return err - } - - err = bindMorphProcessor(s.alphabetProcessor, s) - return err -} - -func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { - poolSize := cfg.GetInt("workers.container") - s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) - // container processor - containerProcessor, err := cont.New(&cont.Params{ - Log: s.log.WithTag(logger.TagProcessor), - Metrics: s.irMetrics, - PoolSize: poolSize, - AlphabetState: s, - ContainerClient: cnrClient, - MorphClient: cnrClient.Morph(), - FrostFSIDClient: frostfsIDClient, - NetworkState: s.netmapClient, - }) - if err != nil { - return err - } - - return bindMorphProcessor(containerProcessor, s) -} - -func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { - poolSize := cfg.GetInt("workers.balance") - s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) - // create balance processor - balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log.WithTag(logger.TagProcessor), - Metrics: s.irMetrics, - PoolSize: poolSize, - FrostFSClient: frostfsCli, - BalanceSC: s.contracts.balance, - AlphabetState: s, - Converter: &s.precision, - }) - if err != nil { - return err - } - - return bindMorphProcessor(balanceProcessor, s) -} - -func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error { - if s.withoutMainNet { - return nil - } - poolSize := cfg.GetInt("workers.frostfs") - s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) - - frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log.WithTag(logger.TagProcessor), - Metrics: s.irMetrics, - PoolSize: poolSize, - FrostFSContract: s.contracts.frostfs, - BalanceClient: s.balanceClient, - NetmapClient: s.netmapClient, - MorphClient: s.morphClient, - EpochState: s, - AlphabetState: s, - Converter: &s.precision, - MintEmitCacheSize: cfg.GetInt("emit.mint.cache_size"), - MintEmitThreshold: cfg.GetUint64("emit.mint.threshold"), - MintEmitValue: fixedn.Fixed8(cfg.GetInt64("emit.mint.value")), - GasBalanceThreshold: cfg.GetInt64("emit.gas.balance_threshold"), - }) - if err != nil { - return err - } - - return bindMainnetProcessor(frostfsProcessor, s) -} - -func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { - controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") - if controlSvcEndpoint == "" { - s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified) - return nil - } - - authKeysStr := cfg.GetStringSlice("control.authorized_keys") - authKeys := make([][]byte, 0, len(authKeysStr)) - - for i := range authKeysStr { - key, err := hex.DecodeString(authKeysStr[i]) - if err != nil { - return fmt.Errorf("could not parse Control authorized key %s: %w", - authKeysStr[i], - err, - ) - } - - authKeys = append(authKeys, key) - } - - var p controlsrv.Prm - - p.SetPrivateKey(*s.key) - p.SetHealthChecker(s) - - controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, - controlsrv.WithAllowedKeys(authKeys), - ), log.WithTag(logger.TagGrpcSvc), audit) - - grpcControlSrv := grpc.NewServer() - control.RegisterControlServiceServer(grpcControlSrv, controlSvc) - - s.runners = append(s.runners, func(ch chan<- error) error { - lis, err := net.Listen("tcp", controlSvcEndpoint) - if err != nil { - return err - } - - go func() { - ch <- grpcControlSrv.Serve(lis) - }() - return nil - }) - - s.registerNoErrCloser(grpcControlSrv.GracefulStop) - return nil -} - -type serverMorphClients struct { - CnrClient *container.Client - FrostFSIDClient *frostfsid.Client - FrostFSClient *frostfsClient.Client -} - -func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { - result := &serverMorphClients{} - var err error - - fee := s.feeConfig.SideChainFee() - - // form morph container client's options - morphCnrOpts := make([]container.Option, 0, 3) - morphCnrOpts = append(morphCnrOpts, - container.AsAlphabet(), - ) - - result.CnrClient, err = container.NewFromMorph(s.morphClient, s.contracts.container, fee, morphCnrOpts...) - if err != nil { - return nil, err - } - s.containerClient = result.CnrClient - - s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet()) - if err != nil { - return nil, err - } - - s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet()) - if err != nil { - return nil, err - } - - result.FrostFSIDClient, err = frostfsid.NewFromMorph(s.morphClient, s.contracts.frostfsID, fee) - if err != nil { - return nil, err - } - - result.FrostFSClient, err = frostfsClient.NewFromMorph(s.mainnetClient, s.contracts.frostfs, - s.feeConfig.MainChainFee(), frostfsClient.TryNotary(), frostfsClient.AsAlphabet()) - if err != nil { - return nil, err - } - - return result, nil -} - -func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error { - irf := s.createIRFetcher() - - s.statusIndex = newInnerRingIndexer( - s.morphClient, - irf, - s.key.PublicKey(), - cfg.GetDuration("indexer.cache_timeout"), - ) - - alphaSync, err := s.createAlphaSync(cfg, morphClients.FrostFSClient, irf) - if err != nil { - return err - } - - err = s.initNetmapProcessor(ctx, cfg, alphaSync) - if err != nil { - return err - } - - err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) - if err != nil { - return err - } - - err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient) - if err != nil { - return err - } - - err = s.initFrostFSMainnetProcessor(ctx, cfg) - if err != nil { - return err - } - - err = s.initAlphabetProcessor(ctx, cfg) - return err -} - -func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- error) (*chainParams, error) { - fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) - if err != nil { - fromSideChainBlock = 0 - s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) - } - - morphChain := &chainParams{ - log: s.log.WithTag(logger.TagMorph), - cfg: cfg, - key: s.key, - name: morphPrefix, - from: fromSideChainBlock, - morphCacheMetric: s.irMetrics.MorphCacheMetrics(), - multinetMetrics: s.irMetrics.Multinet(), - } - - // create morph client - s.morphClient, err = createClient(ctx, morphChain, errChan) - if err != nil { - return nil, err - } - - // create morph listener - s.morphListener, err = createListener(ctx, s.morphClient, morphChain) - if err != nil { - return nil, err - } - if err := s.morphClient.SetGroupSignerScope(); err != nil { - morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) - } - - return morphChain, nil -} - -func (s *Server) initContracts(cfg *viper.Viper) error { - var err error - // get all script hashes of contracts - s.contracts, err = parseContracts( - cfg, - s.morphClient, - s.withoutMainNet, - s.mainNotaryConfig.disabled, - ) - - return err -} - -func (s *Server) initKey(cfg *viper.Viper) error { - // prepare inner ring node private key - acc, err := utilConfig.LoadAccount( - cfg.GetString("wallet.path"), - cfg.GetString("wallet.address"), - cfg.GetString("wallet.password")) - if err != nil { - return fmt.Errorf("ir: %w", err) - } - - s.key = acc.PrivateKey() - return nil -} diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go deleted file mode 100644 index 3a5137261..000000000 --- a/pkg/innerring/innerring.go +++ /dev/null @@ -1,662 +0,0 @@ -package innerring - -import ( - "context" - "errors" - "fmt" - "io" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" - timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/precision" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -type ( - // Server is the inner ring application structure, that contains all event - // processors, shared variables and event handlers. - Server struct { - log *logger.Logger - - // event producers - morphListener event.Listener - mainnetListener event.Listener - blockTimers []*timer.BlockTimer - epochTimer *timer.BlockTimer - - // global state - morphClient *client.Client - mainnetClient *client.Client - epochCounter atomic.Uint64 - epochDuration atomic.Uint64 - statusIndex *innerRingIndexer - precision precision.Fixed8Converter - healthStatus atomic.Int32 - balanceClient *balanceClient.Client - netmapClient *nmClient.Client - persistate *state.PersistentStorage - containerClient *container.Client - - // metrics - irMetrics *metrics.InnerRingServiceMetrics - - // notary configuration - feeConfig *config.FeeConfig - mainNotaryConfig *notaryConfig - - // internal variables - key *keys.PrivateKey - contracts *contracts - predefinedValidators keys.PublicKeys - initialEpochTickDelta uint32 - withoutMainNet bool - sdNotify bool - - // runtime processors - netmapProcessor *netmap.Processor - alphabetProcessor *alphabet.Processor - - workers []func(context.Context) - - // Set of local resources that must be - // initialized at the very beginning of - // Server's work, (e.g. opening files). - // - // If any starter returns an error, Server's - // starting fails immediately. - starters []func() error - - // Set of local resources that must be - // released at Server's work completion - // (e.g closing files). - // - // Closer's wrong outcome shouldn't be critical. - // - // Errors are logged. - closers []func() error - - // Set of component runners which - // should report start errors - // to the application. - runners []func(chan<- error) error - - // cmode used for upgrade scenario. - // nolint:unused - cmode *atomic.Bool - } - - chainParams struct { - log *logger.Logger - cfg *viper.Viper - key *keys.PrivateKey - name string - sgn *transaction.Signer - from uint32 // block height - morphCacheMetric metrics.MorphCacheMetrics - multinetMetrics metrics.MultinetMetrics - } -) - -const ( - morphPrefix = "morph" - mainnetPrefix = "mainnet" - - // extra blocks to overlap two deposits, we do that to make sure that - // there won't be any blocks without deposited assets in notary contract; - // make sure it is bigger than any extra rounding value in notary client. - notaryExtraBlocks = 300 - // amount of tries before notary deposit timeout. - notaryDepositTimeout = 100 -) - -var ( - errDepositTimeout = errors.New("notary deposit didn't appear in the network") - errDepositFail = errors.New("notary tx has faulted") -) - -// Start runs all event providers. -func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { - s.setHealthStatus(ctx, control.HealthStatus_STARTING) - defer func() { - if err == nil { - s.setHealthStatus(ctx, control.HealthStatus_READY) - } - }() - - err = s.launchStarters() - if err != nil { - return err - } - - err = s.initConfigFromBlockchain(ctx) - if err != nil { - return err - } - - if s.IsAlphabet(ctx) { - err = s.initMainNotary(ctx) - if err != nil { - return err - } - - err = s.initSideNotary(ctx) - if err != nil { - return err - } - } - - prm := governance.VoteValidatorPrm{} - prm.Validators = s.predefinedValidators - - // vote for sidechain validator if it is prepared in config - err = s.voteForSidechainValidator(ctx, prm) - if err != nil { - // we don't stop inner ring execution on this error - s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, - zap.Error(err)) - } - - s.tickInitialExpoch(ctx) - - morphErr := make(chan error) - mainnnetErr := make(chan error) - - // anonymous function to multiplex error channels - go func() { - select { - case <-ctx.Done(): - return - case err := <-morphErr: - intError <- fmt.Errorf("sidechain: %w", err) - case err := <-mainnnetErr: - intError <- fmt.Errorf("mainnet: %w", err) - } - }() - - s.registerMorphNewBlockEventHandler() - s.registerMainnetNewBlockEventHandler() - - if err := s.startRunners(intError); err != nil { - return err - } - - go s.morphListener.ListenWithError(ctx, morphErr) // listen for neo:morph events - go s.mainnetListener.ListenWithError(ctx, mainnnetErr) // listen for neo:mainnet events - - if err := s.startBlockTimers(); err != nil { - return fmt.Errorf("could not start block timers: %w", err) - } - - s.startWorkers(ctx) - - return nil -} - -func (s *Server) registerMorphNewBlockEventHandler() { - s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { - s.log.Debug(ctx, logs.InnerringNewBlock, - zap.Uint32("index", b.Index), - ) - - err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) - if err != nil { - s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, - zap.String("chain", "side"), - zap.Uint32("block_index", b.Index)) - } - - s.tickTimers(b.Index) - }) -} - -func (s *Server) registerMainnetNewBlockEventHandler() { - if !s.withoutMainNet { - s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { - err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) - if err != nil { - s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, - zap.String("chain", "main"), - zap.Uint32("block_index", b.Index)) - } - }) - } -} - -func (s *Server) startRunners(errCh chan<- error) error { - for _, runner := range s.runners { - if err := runner(errCh); err != nil { - return err - } - } - return nil -} - -func (s *Server) launchStarters() error { - for _, starter := range s.starters { - if err := starter(); err != nil { - return err - } - } - return nil -} - -func (s *Server) initMainNotary(ctx context.Context) error { - if !s.mainNotaryConfig.disabled { - return s.initNotary(ctx, - s.depositMainNotary, - s.awaitMainNotaryDeposit, - "waiting to accept main notary deposit", - ) - } - return nil -} - -func (s *Server) initSideNotary(ctx context.Context) error { - return s.initNotary(ctx, - s.depositSideNotary, - s.awaitSideNotaryDeposit, - "waiting to accept side notary deposit", - ) -} - -func (s *Server) tickInitialExpoch(ctx context.Context) { - initialEpochTicker := timer.NewOneTickTimer( - timer.StaticBlockMeter(s.initialEpochTickDelta), - func() { - s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{}) - }) - s.addBlockTimer(initialEpochTicker) -} - -func (s *Server) startWorkers(ctx context.Context) { - for _, w := range s.workers { - go w(ctx) - } -} - -// Stop closes all subscription channels. -func (s *Server) Stop(ctx context.Context) { - s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) - - go s.morphListener.Stop() - go s.mainnetListener.Stop() - - for _, c := range s.closers { - if err := c(); err != nil { - s.log.Warn(ctx, logs.InnerringCloserError, - zap.Error(err), - ) - } - } -} - -func (s *Server) registerNoErrCloser(c func()) { - s.registerCloser(func() error { - c() - return nil - }) -} - -func (s *Server) registerIOCloser(c io.Closer) { - s.registerCloser(c.Close) -} - -func (s *Server) registerCloser(f func() error) { - s.closers = append(s.closers, f) -} - -func (s *Server) registerStarter(f func() error) { - s.starters = append(s.starters, f) -} - -// New creates instance of inner ring sever structure. -func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error, - metrics *metrics.InnerRingServiceMetrics, cmode *atomic.Bool, audit *atomic.Bool, -) (*Server, error) { - var err error - server := &Server{ - log: log.WithTag(logger.TagIr), - irMetrics: metrics, - cmode: cmode, - } - - server.sdNotify, err = server.initSdNotify(cfg) - if err != nil { - return nil, err - } - - server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED) - - // parse notary support - server.feeConfig = config.NewFeeConfig(cfg) - - err = server.initKey(cfg) - if err != nil { - return nil, err - } - - server.persistate, err = initPersistentStateStorage(cfg) - if err != nil { - return nil, err - } - server.registerCloser(server.persistate.Close) - - var morphChain *chainParams - morphChain, err = server.initMorph(ctx, cfg, errChan) - if err != nil { - return nil, err - } - - err = server.initMainnet(ctx, cfg, morphChain, errChan) - if err != nil { - return nil, err - } - - server.initNotaryConfig(ctx) - - err = server.initContracts(cfg) - if err != nil { - return nil, err - } - - err = server.enableNotarySupport() - if err != nil { - return nil, err - } - - // parse default validators - server.predefinedValidators, err = parsePredefinedValidators(cfg) - if err != nil { - return nil, fmt.Errorf("ir: can't parse predefined validators list: %w", err) - } - - var morphClients *serverMorphClients - morphClients, err = server.initClientsFromMorph() - if err != nil { - return nil, err - } - - err = server.initProcessors(ctx, cfg, morphClients) - if err != nil { - return nil, err - } - - server.initTimers(ctx, cfg) - - err = server.initGRPCServer(ctx, cfg, log, audit) - if err != nil { - return nil, err - } - - return server, nil -} - -func (s *Server) initSdNotify(cfg *viper.Viper) (bool, error) { - if cfg.GetBool("systemdnotify.enabled") { - return true, sdnotify.InitSocket() - } - return false, nil -} - -func createListener(ctx context.Context, cli *client.Client, p *chainParams) (event.Listener, error) { - var ( - sub subscriber.Subscriber - err error - ) - - sub, err = subscriber.New(ctx, &subscriber.Params{ - Log: p.log, - StartFromBlock: p.from, - Client: cli, - }) - if err != nil { - return nil, err - } - - listener, err := event.NewListener(event.ListenerParams{ - Logger: p.log.With(zap.String("chain", p.name)), - Subscriber: sub, - }) - if err != nil { - return nil, err - } - - return listener, err -} - -func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*client.Client, error) { - // config name left unchanged for compatibility, may be its better to rename it to "endpoints" or "clients" - var endpoints []client.Endpoint - - // defaultPriority is a default endpoint priority - const defaultPriority = 1 - - section := p.name + ".endpoint.client" - for i := 0; ; i++ { - addr := p.cfg.GetString(fmt.Sprintf("%s.%d.%s", section, i, "address")) - if addr == "" { - break - } - - priority := p.cfg.GetInt(section + ".priority") - if priority <= 0 { - priority = defaultPriority - } - - var mtlsConfig *client.MTLSConfig - rootCAs := p.cfg.GetStringSlice(fmt.Sprintf("%s.%d.trusted_ca_list", section, i)) - if len(rootCAs) != 0 { - mtlsConfig = &client.MTLSConfig{ - TrustedCAList: rootCAs, - KeyFile: p.cfg.GetString(fmt.Sprintf("%s.%d.key", section, i)), - CertFile: p.cfg.GetString(fmt.Sprintf("%s.%d.certificate", section, i)), - } - } - - endpoints = append(endpoints, client.Endpoint{ - Address: addr, - Priority: priority, - MTLSConfig: mtlsConfig, - }) - } - - if len(endpoints) == 0 { - return nil, fmt.Errorf("%s chain client endpoints not provided", p.name) - } - - nc := parseMultinetConfig(p.cfg, p.multinetMetrics) - ds, err := internalNet.NewDialerSource(nc) - if err != nil { - return nil, fmt.Errorf("dialer source: %w", err) - } - - return client.New( - ctx, - p.key, - client.WithLogger(p.log), - client.WithDialTimeout(p.cfg.GetDuration(p.name+".dial_timeout")), - client.WithSigner(p.sgn), - client.WithEndpoints(endpoints...), - client.WithConnLostCallback(func() { - errChan <- fmt.Errorf("%s chain connection has been lost", p.name) - }), - client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")), - client.WithMorphCacheMetrics(p.morphCacheMetric), - client.WithDialerSource(ds), - ) -} - -func parsePredefinedValidators(cfg *viper.Viper) (keys.PublicKeys, error) { - publicKeyStrings := cfg.GetStringSlice("morph.validators") - - return ParsePublicKeysFromStrings(publicKeyStrings) -} - -// ParsePublicKeysFromStrings returns slice of neo public keys from slice -// of hex encoded strings. -func ParsePublicKeysFromStrings(pubKeys []string) (keys.PublicKeys, error) { - publicKeys := make(keys.PublicKeys, 0, len(pubKeys)) - - for i := range pubKeys { - key, err := keys.NewPublicKeyFromString(pubKeys[i]) - if err != nil { - return nil, fmt.Errorf("can't decode public key: %w", err) - } - - publicKeys = append(publicKeys, key) - } - - return publicKeys, nil -} - -// parseWalletAddressesFromStrings returns a slice of util.Uint160 from a slice -// of strings. -func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) { - if len(wallets) == 0 { - return nil, nil - } - - var err error - extraWallets := make([]util.Uint160, len(wallets)) - for i := range wallets { - extraWallets[i], err = address.StringToUint160(wallets[i]) - if err != nil { - return nil, err - } - } - return extraWallets, nil -} - -func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config { - nc := internalNet.Config{ - Enabled: cfg.GetBool("multinet.enabled"), - Balancer: cfg.GetString("multinet.balancer"), - Restrict: cfg.GetBool("multinet.restrict"), - FallbackDelay: cfg.GetDuration("multinet.fallback_delay"), - Metrics: m, - } - for i := 0; ; i++ { - mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i)) - if mask == "" { - break - } - sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i)) - nc.Subnets = append(nc.Subnets, internalNet.Subnet{ - Prefix: mask, - SourceIPs: sourceIPs, - }) - } - return nc -} - -func (s *Server) initConfigFromBlockchain(ctx context.Context) error { - // get current epoch - epoch, err := s.netmapClient.Epoch(ctx) - if err != nil { - return fmt.Errorf("can't read epoch number: %w", err) - } - - // get current epoch duration - epochDuration, err := s.netmapClient.EpochDuration(ctx) - if err != nil { - return fmt.Errorf("can't read epoch duration: %w", err) - } - - // get balance precision - balancePrecision, err := s.balanceClient.Decimals(ctx) - if err != nil { - return fmt.Errorf("can't read balance contract precision: %w", err) - } - - s.epochCounter.Store(epoch) - s.epochDuration.Store(epochDuration) - s.precision.SetBalancePrecision(balancePrecision) - - // get next epoch delta tick - s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) - if err != nil { - return err - } - - s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain, - zap.Bool("active", s.IsActive(ctx)), - zap.Bool("alphabet", s.IsAlphabet(ctx)), - zap.Uint64("epoch", epoch), - zap.Uint32("precision", balancePrecision), - zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta), - ) - - return nil -} - -func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { - epochBlock, err := s.netmapClient.LastEpochBlock(ctx) - if err != nil { - return 0, fmt.Errorf("can't read last epoch block: %w", err) - } - - blockHeight, err := s.morphClient.BlockCount() - if err != nil { - return 0, fmt.Errorf("can't get side chain height: %w", err) - } - - delta := uint32(s.epochDuration.Load()) + epochBlock - if delta < blockHeight { - return 0, nil - } - - return delta - blockHeight, nil -} - -// onlyAlphabet wrapper around event handler that executes it -// only if inner ring node is alphabet node. -func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler { - return func(ctx context.Context, ev event.Event) { - if s.IsAlphabet(ctx) { - f(ctx, ev) - } - } -} - -func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler { - newEpochHandlers := []newEpochHandler{ - func() { - s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{}) - }, - } - - return newEpochHandlers -} - -func (s *Server) SetExtraWallets(cfg *viper.Viper) error { - parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) - if err != nil { - return err - } - s.alphabetProcessor.SetParsedWallets(parsedWallets) - return nil -} diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go deleted file mode 100644 index ae4c85168..000000000 --- a/pkg/innerring/locode.go +++ /dev/null @@ -1,53 +0,0 @@ -package innerring - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" - irlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode" - "github.com/spf13/viper" -) - -func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { - locodeDB := locodebolt.New(locodebolt.Prm{ - Path: cfg.GetString("locode.db.path"), - }, - locodebolt.ReadOnly(), - ) - - s.registerStarter(locodeDB.Open) - s.registerIOCloser(locodeDB) - - return irlocode.New(irlocode.Prm{ - DB: (*locodeBoltDBWrapper)(locodeDB), - }) -} - -type locodeBoltEntryWrapper struct { - *locodedb.Key - *locodedb.Record -} - -func (l *locodeBoltEntryWrapper) LocationName() string { - return l.Record.LocationName() -} - -type locodeBoltDBWrapper locodebolt.DB - -func (l *locodeBoltDBWrapper) Get(lc *locode.LOCODE) (irlocode.Record, error) { - key, err := locodedb.NewKey(*lc) - if err != nil { - return nil, err - } - - rec, err := (*locodebolt.DB)(l).Get(*key) - if err != nil { - return nil, err - } - - return &locodeBoltEntryWrapper{ - Key: key, - Record: rec, - }, nil -} diff --git a/pkg/innerring/metrics/metrics.go b/pkg/innerring/metrics/metrics.go deleted file mode 100644 index 002f3afe1..000000000 --- a/pkg/innerring/metrics/metrics.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import "time" - -type Register interface { - SetEpoch(epoch uint64) - SetHealth(s int32) - AddEvent(d time.Duration, typ string, success bool) -} - -type DefaultRegister struct{} - -func (DefaultRegister) SetEpoch(uint64) {} -func (DefaultRegister) SetHealth(int32) {} -func (DefaultRegister) AddEvent(time.Duration, string, bool) {} diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go deleted file mode 100644 index fb11e9426..000000000 --- a/pkg/innerring/netmap.go +++ /dev/null @@ -1,30 +0,0 @@ -package innerring - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" -) - -/* -File contains dependencies for processor of the Netmap contract's notifications. -*/ - -// wraps Netmap contract's client and provides state.NetworkSettings. -type networkSettings netmapclient.Client - -// MaintenanceModeAllowed requests network configuration from the Sidechain -// and check allowance of storage node's maintenance mode according to it. -// Always returns state.ErrMaintenanceModeDisallowed. -func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { - allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) - if err != nil { - return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) - } else if allowed { - return nil - } - - return state.ErrMaintenanceModeDisallowed -} diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go deleted file mode 100644 index c8a69962f..000000000 --- a/pkg/innerring/notary.go +++ /dev/null @@ -1,123 +0,0 @@ -package innerring - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -type ( - notaryConfig struct { - disabled bool // true if notary disabled on chain - } -) - -const ( - // gasMultiplier defines how many times more the notary - // balance must be compared to the GAS balance of the IR: - // notaryBalance = GASBalance * gasMultiplier. - gasMultiplier = 3 - - // gasDivisor defines what part of GAS balance (1/gasDivisor) - // should be transferred to the notary service. - gasDivisor = 2 -) - -func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) { - depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor) - if err != nil { - return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err) - } - - return s.mainnetClient.DepositNotary( - ctx, - depositAmount, - uint32(s.epochDuration.Load())+notaryExtraBlocks, - ) -} - -func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) { - depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor) - if err != nil { - return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err) - } - - tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount) - return tx, err -} - -func (s *Server) notaryHandler(ctx context.Context, _ event.Event) { - if !s.mainNotaryConfig.disabled { - _, err := s.depositMainNotary(ctx) - if err != nil { - s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) - } - } - - if _, err := s.depositSideNotary(ctx); err != nil { - s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) - } -} - -func (s *Server) awaitMainNotaryDeposit(ctx context.Context, tx util.Uint256) error { - return awaitNotaryDepositInClient(ctx, s.mainnetClient, tx) -} - -func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) error { - return awaitNotaryDepositInClient(ctx, s.morphClient, tx) -} - -func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error { - tx, err := deposit(ctx) - if err != nil { - return err - } - - if tx.Equals(util.Uint256{}) { - // non-error deposit with an empty TX hash means - // that the deposit has already been made; no - // need to wait it. - s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade) - return nil - } - - s.log.Info(ctx, msg) - - return await(ctx, tx) -} - -func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error { - for range notaryDepositTimeout { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - ok, err := cli.TxHalt(txHash) - if err == nil { - if ok { - return nil - } - - return errDepositFail - } - - _ = cli.Wait(ctx, 1) - } - - return errDepositTimeout -} - -func notaryConfigs(withMainNotary bool) (main *notaryConfig) { - main = new(notaryConfig) - - main.disabled = !withMainNotary - - return -} diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go deleted file mode 100644 index d6b653282..000000000 --- a/pkg/innerring/processors/alphabet/handlers.go +++ /dev/null @@ -1,25 +0,0 @@ -package alphabet - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "go.uber.org/zap" -) - -func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) { - _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit")) - - // send event to the worker pool - - err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) }) - if err != nil { - // there system can be moved into controlled degradation stage - ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained, - zap.Int("capacity", ap.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go deleted file mode 100644 index 1da3c401d..000000000 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package alphabet_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { - t.Parallel() - var emission uint64 = 100_000 - var index int = 5 - var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}} - - alphabetContracts := innerring.NewAlphabetContracts() - for i := range index + 1 { - alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} - } - - morphClient := &testMorphClient{} - - var node1 netmap.NodeInfo - key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") - require.NoError(t, err, "failed to parse key1") - node1.SetPublicKey(key1.Bytes()) - - var node2 netmap.NodeInfo - key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3") - require.NoError(t, err, "failed to parse key2") - node2.SetPublicKey(key2.Bytes()) - - nodes := []netmap.NodeInfo{node1, node2} - - network := &netmap.NetMap{} - network.SetNodes(nodes) - - netmapClient := &testNetmapClient{ - netmap: network, - } - - params := &alphabet.Params{ - ParsedWallets: parsedWallets, - Log: test.NewLogger(t), - PoolSize: 2, - StorageEmission: emission, - IRList: &testIndexer{index: index}, - AlphabetContracts: alphabetContracts, - MorphClient: morphClient, - NetmapClient: netmapClient, - } - - processor, err := alphabet.New(params) - require.NoError(t, err, "failed to create processor instance") - - processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) - - processor.WaitPoolRunning() - - require.EqualValues(t, []invokedMethod{ - { - contract: alphabetContracts[innerring.GlagoliticLetter(index)], - fee: 0, - method: "emit", - }, - }, morphClient.invokedMethods, "invalid invoked morph methods") - - require.EqualValues(t, []transferGas{ - { - receiver: key1.GetScriptHash(), - amount: fixedn.Fixed8(25_000), - }, - { - receiver: key2.GetScriptHash(), - amount: fixedn.Fixed8(25_000), - }, - }, morphClient.transferedGas, "invalid transfered Gas") - - require.EqualValues(t, []batchTransferGas{ - { - receivers: parsedWallets, - amount: fixedn.Fixed8(25_000), - }, - }, morphClient.batchTransferedGas, "invalid batch transfered Gas") -} - -func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { - t.Parallel() - var emission uint64 = 100_000 - var index int = 5 - var parsedWallets []util.Uint160 - - alphabetContracts := innerring.NewAlphabetContracts() - for i := range index + 1 { - alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} - } - - morphClient := &testMorphClient{} - - var node1 netmap.NodeInfo - key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") - require.NoError(t, err, "failed to parse key1") - node1.SetPublicKey(key1.Bytes()) - - var node2 netmap.NodeInfo - key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3") - require.NoError(t, err, "failed to parse key2") - node2.SetPublicKey(key2.Bytes()) - - nodes := []netmap.NodeInfo{node1, node2} - - network := &netmap.NetMap{} - network.SetNodes(nodes) - - netmapClient := &testNetmapClient{ - netmap: network, - } - - params := &alphabet.Params{ - ParsedWallets: parsedWallets, - Log: test.NewLogger(t), - PoolSize: 2, - StorageEmission: emission, - IRList: &testIndexer{index: index}, - AlphabetContracts: alphabetContracts, - MorphClient: morphClient, - NetmapClient: netmapClient, - } - - processor, err := alphabet.New(params) - require.NoError(t, err, "failed to create processor instance") - - processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) - - processor.WaitPoolRunning() - - require.EqualValues(t, []invokedMethod{ - { - contract: alphabetContracts[innerring.GlagoliticLetter(index)], - fee: 0, - method: "emit", - }, - }, morphClient.invokedMethods, "invalid invoked morph methods") - - require.EqualValues(t, []transferGas{ - { - receiver: key1.GetScriptHash(), - amount: fixedn.Fixed8(50_000), - }, - { - receiver: key2.GetScriptHash(), - amount: fixedn.Fixed8(50_000), - }, - }, morphClient.transferedGas, "invalid transfered Gas") - - require.Equal(t, 0, len(morphClient.batchTransferedGas), "invalid batch transfered Gas") -} - -func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { - t.Parallel() - var emission uint64 = 100_000 - var index int = 5 - var parsedWallets []util.Uint160 - - alphabetContracts := innerring.NewAlphabetContracts() - for i := range index + 1 { - alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} - } - - morphClient := &testMorphClient{} - - var nodes []netmap.NodeInfo - network := &netmap.NetMap{} - network.SetNodes(nodes) - - netmapClient := &testNetmapClient{ - netmap: network, - } - - params := &alphabet.Params{ - ParsedWallets: parsedWallets, - Log: test.NewLogger(t), - PoolSize: 2, - StorageEmission: emission, - IRList: &testIndexer{index: index}, - AlphabetContracts: alphabetContracts, - MorphClient: morphClient, - NetmapClient: netmapClient, - } - - processor, err := alphabet.New(params) - require.NoError(t, err, "failed to create processor instance") - - processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) - - processor.WaitPoolRunning() - - require.EqualValues(t, []invokedMethod{ - { - contract: alphabetContracts[innerring.GlagoliticLetter(index)], - fee: 0, - method: "emit", - }, - }, morphClient.invokedMethods, "invalid invoked morph methods") - - require.Equal(t, 0, len(morphClient.transferedGas), "invalid transfered Gas") - - require.Equal(t, 0, len(morphClient.batchTransferedGas), "invalid batch transfered Gas") -} - -type testIndexer struct { - index int -} - -func (i *testIndexer) AlphabetIndex(context.Context) int { - return i.index -} - -type invokedMethod struct { - contract util.Uint160 - fee fixedn.Fixed8 - method string - args []any -} - -type transferGas struct { - receiver util.Uint160 - amount fixedn.Fixed8 -} - -type batchTransferGas struct { - receivers []util.Uint160 - amount fixedn.Fixed8 -} - -type testMorphClient struct { - invokedMethods []invokedMethod - transferedGas []transferGas - batchTransferedGas []batchTransferGas -} - -func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) { - c.invokedMethods = append(c.invokedMethods, - invokedMethod{ - contract: contract, - fee: fee, - method: method, - args: args, - }) - return client.InvokeRes{}, nil -} - -func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { - c.transferedGas = append(c.transferedGas, transferGas{ - receiver: receiver, - amount: amount, - }) - return nil -} - -func (c *testMorphClient) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error { - c.batchTransferedGas = append(c.batchTransferedGas, batchTransferGas{ - receivers: receivers, - amount: amount, - }) - return nil -} - -type testNetmapClient struct { - netmap *netmap.NetMap -} - -func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { - return c.netmap, nil -} diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go deleted file mode 100644 index d3d0f83f2..000000000 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ /dev/null @@ -1,117 +0,0 @@ -package alphabet - -import ( - "context" - "crypto/elliptic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -const emitMethod = "emit" - -func (ap *Processor) processEmit(ctx context.Context) bool { - index := ap.irList.AlphabetIndex(ctx) - if index < 0 { - ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) - - return true - } - - contract, ok := ap.alphabetContracts.GetByIndex(index) - if !ok { - ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, - zap.Int("index", index)) - - return false - } - - // there is no signature collecting, so we don't need extra fee - _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) - if err != nil { - ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err)) - - return false - } - - if ap.storageEmission == 0 { - ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff) - - return true - } - - networkMap, err := ap.netmapClient.NetMap(ctx) - if err != nil { - ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, - zap.Error(err)) - - return false - } - - nmNodes := networkMap.Nodes() - nmLen := len(nmNodes) - ap.pwLock.RLock() - pw := ap.parsedWallets - ap.pwLock.RUnlock() - extraLen := len(pw) - - ap.log.Debug(ctx, logs.AlphabetGasEmission, - zap.Int("network_map", nmLen), - zap.Int("extra_wallets", extraLen)) - - if nmLen+extraLen == 0 { - return true - } - - gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen)) - - ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode) - - ap.transferGasToExtraNodes(ctx, pw, gasPerNode) - - return true -} - -func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { - for i := range nmNodes { - keyBytes := nmNodes[i].PublicKey() - - key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) - if err != nil { - ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, - zap.Error(err)) - - continue - } - - err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) - if err != nil { - ap.log.Warn(ctx, logs.AlphabetCantTransferGas, - zap.String("receiver", key.Address()), - zap.Int64("amount", int64(gasPerNode)), - zap.Error(err), - ) - } - } -} - -func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) { - if len(pw) > 0 { - err := ap.morphClient.BatchTransferGas(pw, gasPerNode) - if err != nil { - receiversLog := make([]string, len(pw)) - for i, addr := range pw { - receiversLog[i] = addr.StringLE() - } - ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, - zap.Strings("receivers", receiversLog), - zap.Int64("amount", int64(gasPerNode)), - zap.Error(err), - ) - } - } -} diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go deleted file mode 100644 index 0aea74003..000000000 --- a/pkg/innerring/processors/alphabet/processor.go +++ /dev/null @@ -1,138 +0,0 @@ -package alphabet - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" -) - -type ( - // Indexer is a callback interface for inner ring global state. - Indexer interface { - AlphabetIndex(context.Context) int - } - - // Contracts is an interface of the storage - // of the alphabet contract addresses. - Contracts interface { - // GetByIndex must return the address of the - // alphabet contract by index of the glagolitic - // letter (e.g 0 for Az, 40 for Izhitsa). - // - // Must return false if the index does not - // match any alphabet contract. - GetByIndex(int) (util.Uint160, bool) - } - - netmapClient interface { - NetMap(ctx context.Context) (*netmap.NetMap, error) - } - - morphClient interface { - Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) - TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error - BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error - } - - // Processor of events produced for alphabet contracts in the sidechain. - Processor struct { - parsedWallets []util.Uint160 - // protects parsedWallets from concurrent change - pwLock sync.RWMutex - log *logger.Logger - metrics metrics.Register - pool *ants.Pool - alphabetContracts Contracts - netmapClient netmapClient - morphClient morphClient - irList Indexer - storageEmission uint64 - } - - // Params of the processor constructor. - Params struct { - ParsedWallets []util.Uint160 - Log *logger.Logger - Metrics metrics.Register - PoolSize int - AlphabetContracts Contracts - NetmapClient netmapClient - MorphClient morphClient - IRList Indexer - StorageEmission uint64 - } -) - -// New creates a frostfs mainnet contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/alphabet: logger is not set") - case p.MorphClient == nil: - return nil, errors.New("ir/alphabet: neo:morph client is not set") - case p.IRList == nil: - return nil, errors.New("ir/alphabet: global state is not set") - } - - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) - } - - metricsRegister := p.Metrics - if metricsRegister == nil { - metricsRegister = metrics.DefaultRegister{} - } - - return &Processor{ - parsedWallets: p.ParsedWallets, - log: p.Log, - metrics: metricsRegister, - pool: pool, - alphabetContracts: p.AlphabetContracts, - netmapClient: p.NetmapClient, - morphClient: p.MorphClient, - irList: p.IRList, - storageEmission: p.StorageEmission, - }, nil -} - -func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) { - ap.pwLock.Lock() - ap.parsedWallets = parsedWallets - ap.pwLock.Unlock() -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return nil -} - -// ListenerNotaryParsers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - return nil -} - -// ListenerNotaryHandlers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - return nil -} - -// WaitPoolRunning waits while pool has running tasks -// For use in test only. -func (ap *Processor) WaitPoolRunning() { - for ap.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } -} diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go deleted file mode 100644 index b5d05a02e..000000000 --- a/pkg/innerring/processors/balance/handlers.go +++ /dev/null @@ -1,30 +0,0 @@ -package balance - -import ( - "context" - "encoding/hex" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" - "go.uber.org/zap" -) - -func (bp *Processor) handleLock(ctx context.Context, ev event.Event) { - lock := ev.(balanceEvent.Lock) - bp.log.Info(ctx, logs.Notification, - zap.String("type", "lock"), - zap.String("value", hex.EncodeToString(lock.ID()))) - - // send an event to the worker pool - - err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool { - return bp.processLock(ctx, &lock) - }) - if err != nil { - // there system can be moved into controlled degradation stage - bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained, - zap.Int("capacity", bp.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go deleted file mode 100644 index 0fd23d8ab..000000000 --- a/pkg/innerring/processors/balance/handlers_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package balance - -import ( - "context" - "testing" - "time" - - frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) { - t.Parallel() - as := &testAlphabetState{ - isAlphabet: true, - } - conv := &testPresicionConverter{} - cl := &testFrostFSContractClient{} - bsc := util.Uint160{100} - - processor, err := New(&Params{ - Log: test.NewLogger(t), - PoolSize: 2, - FrostFSClient: cl, - BalanceSC: bsc, - AlphabetState: as, - Converter: conv, - }) - require.NoError(t, err, "failed to create processor") - - processor.handleLock(context.Background(), balanceEvent.Lock{}) - - for processor.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.Equal(t, 1, cl.chequeCalls, "invalid Cheque calls") -} - -func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) { - t.Parallel() - as := &testAlphabetState{} - conv := &testPresicionConverter{} - cl := &testFrostFSContractClient{} - bsc := util.Uint160{100} - - processor, err := New(&Params{ - Log: test.NewLogger(t), - PoolSize: 2, - FrostFSClient: cl, - BalanceSC: bsc, - AlphabetState: as, - Converter: conv, - }) - require.NoError(t, err, "failed to create processor") - - processor.handleLock(context.Background(), balanceEvent.Lock{}) - - for processor.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.Equal(t, 0, cl.chequeCalls, "invalid Cheque calls") -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet(context.Context) bool { - return s.isAlphabet -} - -type testPresicionConverter struct{} - -func (c *testPresicionConverter) ToFixed8(v int64) int64 { - return v -} - -type testFrostFSContractClient struct { - chequeCalls int -} - -func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error { - c.chequeCalls++ - return nil -} diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go deleted file mode 100644 index 60475908c..000000000 --- a/pkg/innerring/processors/balance/process_assets.go +++ /dev/null @@ -1,35 +0,0 @@ -package balance - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" - "go.uber.org/zap" -) - -// Process lock event by invoking Cheque method in main net to send assets -// back to the withdraw issuer. -func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool { - if !bp.alphabetState.IsAlphabet(ctx) { - bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock) - return true - } - - prm := frostfsContract.ChequePrm{} - - prm.SetID(lock.ID()) - prm.SetUser(lock.User()) - prm.SetAmount(bp.converter.ToFixed8(lock.Amount())) - prm.SetLock(lock.LockAccount()) - prm.SetHash(lock.TxHash()) - - err := bp.frostfsClient.Cheque(ctx, prm) - if err != nil { - bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err)) - return false - } - - return true -} diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go deleted file mode 100644 index 34203b74f..000000000 --- a/pkg/innerring/processors/balance/processor.go +++ /dev/null @@ -1,111 +0,0 @@ -package balance - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" -) - -type ( - // AlphabetState is a callback interface for inner ring global state. - AlphabetState interface { - IsAlphabet(context.Context) bool - } - - // PrecisionConverter converts balance amount values. - PrecisionConverter interface { - ToFixed8(int64) int64 - } - - FrostFSClient interface { - Cheque(ctx context.Context, p frostfscontract.ChequePrm) error - } - - // Processor of events produced by balance contract in the morphchain. - Processor struct { - log *logger.Logger - metrics metrics.Register - pool *ants.Pool - frostfsClient FrostFSClient - balanceSC util.Uint160 - alphabetState AlphabetState - converter PrecisionConverter - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - Metrics metrics.Register - PoolSize int - FrostFSClient FrostFSClient - BalanceSC util.Uint160 - AlphabetState AlphabetState - Converter PrecisionConverter - } -) - -const ( - lockNotification = "Lock" -) - -// New creates a balance contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/balance: logger is not set") - case p.AlphabetState == nil: - return nil, errors.New("ir/balance: global state is not set") - case p.Converter == nil: - return nil, errors.New("ir/balance: balance precision converter is not set") - } - - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err) - } - - metricsRegister := p.Metrics - if metricsRegister == nil { - metricsRegister = metrics.DefaultRegister{} - } - - return &Processor{ - log: p.Log, - metrics: metricsRegister, - pool: pool, - frostfsClient: p.FrostFSClient, - balanceSC: p.BalanceSC, - alphabetState: p.AlphabetState, - converter: p.Converter, - }, nil -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return []event.NotificationHandlerInfo{ - { - Contract: bp.balanceSC, - Type: lockNotification, - Parser: balanceEvent.ParseLock, - Handlers: []event.Handler{bp.handleLock}, - }, - } -} - -// ListenerNotaryParsers for the 'event.Listener' event producer. -func (bp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - return nil -} - -// ListenerNotaryHandlers for the 'event.Listener' event producer. -func (bp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go deleted file mode 100644 index 5334b9a1f..000000000 --- a/pkg/innerring/processors/container/common.go +++ /dev/null @@ -1,132 +0,0 @@ -package container - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -var ( - errWrongSessionVerb = errors.New("wrong token verb") - errWrongCID = errors.New("wrong container ID") -) - -type signatureVerificationData struct { - ownerContainer user.ID - - verb session.ContainerVerb - - idContainerSet bool - idContainer cid.ID - - binTokenSession []byte - - binPublicKey []byte - - signature []byte - - signedData []byte -} - -// verifySignature is a common method of Container service authentication. Asserts that: -// - for trusted parties: session is valid (*) and issued by container owner -// - operation data is signed by container owner or trusted party -// - operation data signature is correct -// -// (*) includes: -// - session token decodes correctly -// - signature is valid -// - session issued by the container owner -// - v.binPublicKey is a public session key -// - session context corresponds to the container and verb in v -// - session is "alive" -func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { - var err error - var key frostfsecdsa.PublicKeyRFC6979 - keyProvided := v.binPublicKey != nil - - if keyProvided { - err = key.Decode(v.binPublicKey) - if err != nil { - return fmt.Errorf("decode public key: %w", err) - } - } - - if len(v.binTokenSession) > 0 { - return cp.verifyByTokenSession(ctx, v, &key, keyProvided) - } - - if keyProvided { - var idFromKey user.ID - user.IDFromKey(&idFromKey, (ecdsa.PublicKey)(key)) - - if v.ownerContainer.Equals(idFromKey) { - if key.Verify(v.signedData, v.signature) { - return nil - } - - return errors.New("invalid signature calculated by container owner's key") - } - } - - return errors.New("signature is invalid or calculated with the key not bound to the container owner") -} - -func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { - curEpoch, err := cp.netState.Epoch(ctx) - if err != nil { - return fmt.Errorf("could not read current epoch: %w", err) - } - - if token.InvalidAt(curEpoch) { - return fmt.Errorf("token is not valid at %d", curEpoch) - } - - return nil -} - -func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { - var tok session.Container - - err := tok.Unmarshal(v.binTokenSession) - if err != nil { - return fmt.Errorf("decode session token: %w", err) - } - - if !tok.VerifySignature() { - return errors.New("invalid session token signature") - } - - if keyProvided && !tok.AssertAuthKey(key) { - return errors.New("signed with a non-session key") - } - - if !tok.AssertVerb(v.verb) { - return errWrongSessionVerb - } - - if v.idContainerSet && !tok.AppliedTo(v.idContainer) { - return errWrongCID - } - - if !session.IssuedBy(tok, v.ownerContainer) { - return errors.New("owner differs with token owner") - } - - err = cp.checkTokenLifetime(ctx, tok) - if err != nil { - return fmt.Errorf("check session lifetime: %w", err) - } - - if !tok.VerifySessionDataSignature(v.signedData, v.signature) { - return errors.New("invalid signature calculated with session key") - } - - return nil -} diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go deleted file mode 100644 index bb038a3cb..000000000 --- a/pkg/innerring/processors/container/handlers.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "context" - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" - "github.com/mr-tron/base58" - "go.uber.org/zap" -) - -func (cp *Processor) handlePut(ctx context.Context, ev event.Event) { - put := ev.(putEvent) - - id := sha256.Sum256(put.Container()) - cp.log.Info(ctx, logs.Notification, - zap.String("type", "container put"), - zap.String("id", base58.Encode(id[:]))) - - // send an event to the worker pool - - err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool { - return cp.processContainerPut(ctx, put) - }) - if err != nil { - // there system can be moved into controlled degradation stage - cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained, - zap.Int("capacity", cp.pool.Cap())) - } -} - -func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) { - del := ev.(containerEvent.Delete) - cp.log.Info(ctx, logs.Notification, - zap.String("type", "container delete"), - zap.String("id", base58.Encode(del.ContainerID()))) - - // send an event to the worker pool - - err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool { - return cp.processContainerDelete(ctx, del) - }) - if err != nil { - // there system can be moved into controlled degradation stage - cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained, - zap.Int("capacity", cp.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go deleted file mode 100644 index 1b3842eb0..000000000 --- a/pkg/innerring/processors/container/handlers_test.go +++ /dev/null @@ -1,242 +0,0 @@ -package container - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "testing" - "time" - - frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestPutEvent(t *testing.T) { - t.Parallel() - nst := &testNetworkState{ - homHashDisabled: true, - epoch: 100, - } - mc := &testMorphClient{} - - proc, err := New(&Params{ - Log: test.NewLogger(t), - PoolSize: 2, - AlphabetState: &testAlphabetState{isAlphabet: true}, - NetworkState: nst, - ContainerClient: &testContainerClient{}, - MorphClient: mc, - FrostFSIDClient: &testFrostFSIDClient{}, - }) - require.NoError(t, err, "failed to create processor") - - p, err := keys.NewPrivateKey() - require.NoError(t, err) - var usr user.ID - user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey())) - - var pp netmap.PlacementPolicy - pp.AddReplicas(netmap.ReplicaDescriptor{}) - - var cnr containerSDK.Container - cnr.Init() - cnr.SetOwner(usr) - cnr.SetPlacementPolicy(pp) - cnr.SetBasicACL(acl.Private) - containerSDK.DisableHomomorphicHashing(&cnr) - - nr := &payload.P2PNotaryRequest{ - MainTransaction: &transaction.Transaction{}, - } - - event := &testPutEvent{ - cnr: &cnr, - pk: p, - st: nil, - nr: nr, - } - - proc.handlePut(context.Background(), event) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.EqualValues(t, []*transaction.Transaction{nr.MainTransaction}, mc.transactions, "invalid notary requests") -} - -func TestDeleteEvent(t *testing.T) { - t.Parallel() - nst := &testNetworkState{ - homHashDisabled: true, - epoch: 100, - } - cc := &testContainerClient{ - get: make(map[string]*containercore.Container), - } - - p, err := keys.NewPrivateKey() - require.NoError(t, err) - - mc := &testMorphClient{} - - proc, err := New(&Params{ - Log: test.NewLogger(t), - PoolSize: 2, - AlphabetState: &testAlphabetState{isAlphabet: true}, - NetworkState: nst, - ContainerClient: cc, - MorphClient: mc, - FrostFSIDClient: &testFrostFSIDClient{}, - }) - require.NoError(t, err, "failed to create processor") - - var usr user.ID - user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey())) - - var pp netmap.PlacementPolicy - pp.AddReplicas(netmap.ReplicaDescriptor{}) - - var cnr containerSDK.Container - cnr.Init() - cnr.SetOwner(usr) - cnr.SetPlacementPolicy(pp) - cnr.SetBasicACL(acl.Private) - containerSDK.DisableHomomorphicHashing(&cnr) - - var cid cid.ID - containerSDK.CalculateID(&cid, cnr) - cidBin := make([]byte, 32) - cid.Encode(cidBin) - - nr := &payload.P2PNotaryRequest{ - MainTransaction: &transaction.Transaction{}, - } - - ev := containerEvent.Delete{ - ContainerIDValue: cidBin, - SignatureValue: p.Sign(cidBin), - NotaryRequestValue: nr, - PublicKeyValue: p.PublicKey().Bytes(), - } - - var signature frostfscrypto.Signature - signer := frostfsecdsa.Signer(p.PrivateKey) - require.NoError(t, signature.Calculate(signer, ev.ContainerID()), "failed to calculate signature") - cc.get[hex.EncodeToString(ev.ContainerID())] = &containercore.Container{ - Value: cnr, - Signature: signature, - } - - proc.handleDelete(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - var expectedDelete cntClient.DeletePrm - expectedDelete.SetCID(ev.ContainerID()) - expectedDelete.SetSignature(ev.Signature()) - - require.EqualValues(t, []*transaction.Transaction{nr.MainTransaction}, mc.transactions, "invalid notary requests") -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet(context.Context) bool { - return s.isAlphabet -} - -type testNetworkState struct { - homHashDisabled bool - epoch uint64 -} - -func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { - return s.homHashDisabled, nil -} - -func (s *testNetworkState) Epoch(context.Context) (uint64, error) { - return s.epoch, nil -} - -type testContainerClient struct { - contractAddress util.Uint160 - get map[string]*containercore.Container -} - -func (c *testContainerClient) ContractAddress() util.Uint160 { - return c.contractAddress -} - -func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { - key := hex.EncodeToString(cid) - if cont, found := c.get[key]; found { - return cont, nil - } - return nil, new(apistatus.ContainerNotFound) -} - -var _ putEvent = &testPutEvent{} - -type testPutEvent struct { - cnr *containerSDK.Container - pk *keys.PrivateKey - st []byte - nr *payload.P2PNotaryRequest -} - -func (e *testPutEvent) MorphEvent() {} - -func (e *testPutEvent) Container() []byte { - return e.cnr.Marshal() -} - -func (e *testPutEvent) PublicKey() []byte { - return e.pk.PublicKey().Bytes() -} - -func (e *testPutEvent) Signature() []byte { - return e.pk.Sign(e.cnr.Marshal()) -} - -func (e *testPutEvent) SessionToken() []byte { - return e.st -} - -func (e *testPutEvent) NotaryRequest() *payload.P2PNotaryRequest { - return e.nr -} - -type testMorphClient struct { - transactions []*transaction.Transaction -} - -func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { - c.transactions = append(c.transactions, mainTx) - return nil -} - -type testFrostFSIDClient struct{} - -func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { - return &frostfsidclient.Subject{}, nil -} diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go deleted file mode 100644 index 8e4ab2623..000000000 --- a/pkg/innerring/processors/container/process_container.go +++ /dev/null @@ -1,217 +0,0 @@ -package container - -import ( - "context" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -// putEvent is a common interface of Put and PutNamed event. -type putEvent interface { - event.Event - Container() []byte - PublicKey() []byte - Signature() []byte - SessionToken() []byte - NotaryRequest() *payload.P2PNotaryRequest -} - -type putContainerContext struct { - e putEvent - - d containerSDK.Domain -} - -var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner namespaces do not match") - -// Process a new container from the user by checking the container sanity -// and sending approve tx back to the morph. -func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool { - if !cp.alphabetState.IsAlphabet(ctx) { - cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut) - return true - } - - pctx := &putContainerContext{ - e: put, - } - - err := cp.checkPutContainer(ctx, pctx) - if err != nil { - cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, - zap.Error(err), - ) - - return false - } - - if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, - zap.Error(err), - ) - return false - } - - return true -} - -func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { - binCnr := pctx.e.Container() - var cnr containerSDK.Container - - err := cnr.Unmarshal(binCnr) - if err != nil { - return fmt.Errorf("invalid binary container: %w", err) - } - - err = cp.verifySignature(ctx, signatureVerificationData{ - ownerContainer: cnr.Owner(), - verb: session.VerbContainerPut, - binTokenSession: pctx.e.SessionToken(), - binPublicKey: pctx.e.PublicKey(), - signature: pctx.e.Signature(), - signedData: binCnr, - }) - if err != nil { - return fmt.Errorf("auth container creation: %w", err) - } - - // check homomorphic hashing setting - err = checkHomomorphicHashing(ctx, cp.netState, cnr) - if err != nil { - return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) - } - - // check native name and zone - err = cp.checkNNS(ctx, pctx, cnr) - if err != nil { - return fmt.Errorf("NNS: %w", err) - } - - return nil -} - -// Process delete container operation from the user by checking container sanity -// and sending approve tx back to morph. -func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool { - if !cp.alphabetState.IsAlphabet(ctx) { - cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete) - return true - } - - err := cp.checkDeleteContainer(ctx, e) - if err != nil { - cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, - zap.Error(err), - ) - - return false - } - - if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, - zap.Error(err), - ) - - return false - } - - return true -} - -func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { - binCnr := e.ContainerID() - - var idCnr cid.ID - - err := idCnr.Decode(binCnr) - if err != nil { - return fmt.Errorf("invalid container ID: %w", err) - } - - // receive owner of the related container - cnr, err := cp.cnrClient.Get(ctx, binCnr) - if err != nil { - return fmt.Errorf("could not receive the container: %w", err) - } - - err = cp.verifySignature(ctx, signatureVerificationData{ - ownerContainer: cnr.Value.Owner(), - verb: session.VerbContainerDelete, - idContainerSet: true, - idContainer: idCnr, - binTokenSession: e.SessionToken(), - signature: e.Signature(), - signedData: binCnr, - binPublicKey: e.PublicKeyValue, - }) - if err != nil { - return fmt.Errorf("auth container removal: %w", err) - } - - return nil -} - -func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { - // fetch domain info - pctx.d = containerSDK.ReadDomain(cnr) - - // if PutNamed event => check if values in container correspond to args - if named, ok := pctx.e.(interface { - Name() string - Zone() string - }); ok { - if name := named.Name(); name != pctx.d.Name() { - return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) - } - - if zone := named.Zone(); zone != pctx.d.Zone() { - return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) - } - } - - addr, err := util.Uint160DecodeBytesBE(cnr.Owner().WalletBytes()[1 : 1+util.Uint160Size]) - if err != nil { - return fmt.Errorf("could not get container owner address: %w", err) - } - - subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) - if err != nil { - return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) - } - - namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") - if !hasNamespace { - return nil - } - - if subject.Namespace != namespace { - return errContainerAndOwnerNamespaceDontMatch - } - - return nil -} - -func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { - netSetting, err := ns.HomomorphicHashDisabled(ctx) - if err != nil { - return fmt.Errorf("could not get setting in contract: %w", err) - } - - if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting { - return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting) - } - - return nil -} diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go deleted file mode 100644 index 9be93baa4..000000000 --- a/pkg/innerring/processors/container/processor.go +++ /dev/null @@ -1,181 +0,0 @@ -package container - -import ( - "context" - "errors" - "fmt" - - frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" -) - -type ( - // AlphabetState is a callback interface for inner ring global state. - AlphabetState interface { - IsAlphabet(context.Context) bool - } - - ContClient interface { - ContractAddress() util.Uint160 - Get(ctx context.Context, cid []byte) (*containercore.Container, error) - } - - MorphClient interface { - NotarySignAndInvokeTX(mainTx *transaction.Transaction) error - } - - FrostFSIDClient interface { - GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) - } - - // Processor of events produced by container contract in the sidechain. - Processor struct { - log *logger.Logger - metrics metrics.Register - pool *ants.Pool - alphabetState AlphabetState - cnrClient ContClient // notary must be enabled - morphClient MorphClient - netState NetworkState - frostFSIDClient FrostFSIDClient - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - Metrics metrics.Register - PoolSize int - AlphabetState AlphabetState - ContainerClient ContClient - MorphClient MorphClient - NetworkState NetworkState - FrostFSIDClient FrostFSIDClient - } -) - -// NetworkState is an interface of a component -// that provides access to network state. -type NetworkState interface { - // Epoch must return the number of the current epoch. - // - // Must return any error encountered - // which did not allow reading the value. - Epoch(ctx context.Context) (uint64, error) - - // HomomorphicHashDisabled must return boolean that - // represents homomorphic network state: - // * true if hashing is disabled; - // * false if hashing is enabled. - // - // which did not allow reading the value. - HomomorphicHashDisabled(ctx context.Context) (bool, error) -} - -// New creates a container contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/container: logger is not set") - case p.AlphabetState == nil: - return nil, errors.New("ir/container: global state is not set") - case p.ContainerClient == nil: - return nil, errors.New("ir/container: Container client is not set") - case p.MorphClient == nil: - return nil, errors.New("ir/container: Morph client is not set") - case p.NetworkState == nil: - return nil, errors.New("ir/container: network state is not set") - case p.FrostFSIDClient == nil: - return nil, errors.New("ir/container: FrostFSID client is not set") - } - - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err) - } - - metricsRegister := p.Metrics - if metricsRegister == nil { - metricsRegister = metrics.DefaultRegister{} - } - - return &Processor{ - log: p.Log, - metrics: metricsRegister, - pool: pool, - alphabetState: p.AlphabetState, - cnrClient: p.ContainerClient, - netState: p.NetworkState, - morphClient: p.MorphClient, - frostFSIDClient: p.FrostFSIDClient, - }, nil -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return nil -} - -// ListenerNotaryParsers for the 'event.Listener' notary event producer. -func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - var ( - p event.NotaryParserInfo - - pp = make([]event.NotaryParserInfo, 0, 4) - ) - - p.SetMempoolType(mempoolevent.TransactionAdded) - p.SetScriptHash(cp.cnrClient.ContractAddress()) - - // container put - p.SetRequestType(containerEvent.PutNotaryEvent) - p.SetParser(containerEvent.ParsePutNotary) - pp = append(pp, p) - - // container named put - p.SetRequestType(containerEvent.PutNamedNotaryEvent) - p.SetParser(containerEvent.ParsePutNamedNotary) - pp = append(pp, p) - - // container delete - p.SetRequestType(containerEvent.DeleteNotaryEvent) - p.SetParser(containerEvent.ParseDeleteNotary) - pp = append(pp, p) - - return pp -} - -// ListenerNotaryHandlers for the 'event.Listener' notary event producer. -func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - var ( - h event.NotaryHandlerInfo - - hh = make([]event.NotaryHandlerInfo, 0, 4) - ) - - h.SetScriptHash(cp.cnrClient.ContractAddress()) - h.SetMempoolType(mempoolevent.TransactionAdded) - - // container put - h.SetRequestType(containerEvent.PutNotaryEvent) - h.SetHandler(cp.handlePut) - hh = append(hh, h) - - // container named put (same handler) - h.SetRequestType(containerEvent.PutNamedNotaryEvent) - hh = append(hh, h) - - // container delete - h.SetRequestType(containerEvent.DeleteNotaryEvent) - h.SetHandler(cp.handleDelete) - hh = append(hh, h) - - return hh -} diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go deleted file mode 100644 index 936de2e77..000000000 --- a/pkg/innerring/processors/frostfs/handlers.go +++ /dev/null @@ -1,91 +0,0 @@ -package frostfs - -import ( - "bytes" - "context" - "encoding/hex" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" - "go.uber.org/zap" -) - -func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) { - deposit := ev.(frostfsEvent.Deposit) - depositIDBin := bytes.Clone(deposit.ID()) - slices.Reverse(depositIDBin) - np.log.Info(ctx, logs.Notification, - zap.String("type", "deposit"), - zap.String("id", hex.EncodeToString(depositIDBin))) - - // send event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool { - return np.processDeposit(ctx, deposit) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) { - withdraw := ev.(frostfsEvent.Withdraw) - withdrawBin := bytes.Clone(withdraw.ID()) - slices.Reverse(withdrawBin) - np.log.Info(ctx, logs.Notification, - zap.String("type", "withdraw"), - zap.String("id", hex.EncodeToString(withdrawBin))) - - // send event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool { - return np.processWithdraw(ctx, withdraw) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleCheque(ctx context.Context, ev event.Event) { - cheque := ev.(frostfsEvent.Cheque) - np.log.Info(ctx, logs.Notification, - zap.String("type", "cheque"), - zap.String("id", hex.EncodeToString(cheque.ID()))) - - // send event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool { - return np.processCheque(ctx, cheque) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleConfig(ctx context.Context, ev event.Event) { - cfg := ev.(frostfsEvent.Config) - np.log.Info(ctx, logs.Notification, - zap.String("type", "set config"), - zap.String("key", hex.EncodeToString(cfg.Key())), - zap.String("value", hex.EncodeToString(cfg.Value()))) - - // send event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool { - return np.processConfig(ctx, cfg) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go deleted file mode 100644 index 72310f6f9..000000000 --- a/pkg/innerring/processors/frostfs/handlers_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package frostfs - -import ( - "context" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestHandleDeposit(t *testing.T) { - t.Parallel() - es := &testEpochState{ - epochCounter: 100, - } - b := &testBalaceClient{} - m := &testMorphClient{ - balance: 150, - } - proc, err := newTestProc(t, func(p *Params) { - p.EpochState = es - p.BalanceClient = b - p.MorphClient = m - }) - require.NoError(t, err, "failed to create processor") - - ev := frostfsEvent.Deposit{ - IDValue: []byte{1, 2, 3, 4, 5}, - FromValue: util.Uint160{100}, - ToValue: util.Uint160{200}, - AmountValue: 1000, - } - - proc.handleDeposit(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - var expMint balance.MintPrm - expMint.SetAmount(ev.AmountValue) - expMint.SetID(ev.IDValue) - expMint.SetTo(ev.ToValue) - - require.EqualValues(t, []balance.MintPrm{expMint}, b.mint, "invalid mint value") - require.EqualValues(t, []transferGas{ - { - receiver: ev.ToValue, - amount: fixedn.Fixed8(50), - }, - }, m.transferGas, "invalid transfer gas") - - es.epochCounter = 109 - - proc.handleDeposit(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - expMint.SetAmount(ev.AmountValue) - expMint.SetID(ev.IDValue) - expMint.SetTo(ev.ToValue) - - require.EqualValues(t, []balance.MintPrm{expMint, expMint}, b.mint, "invalid mint value") - require.EqualValues(t, []transferGas{ - { - receiver: ev.ToValue, - amount: fixedn.Fixed8(50), - }, - }, m.transferGas, "invalid transfer gas") -} - -func TestHandleWithdraw(t *testing.T) { - t.Parallel() - es := &testEpochState{ - epochCounter: 100, - } - b := &testBalaceClient{} - m := &testMorphClient{ - balance: 150, - } - proc, err := newTestProc(t, func(p *Params) { - p.EpochState = es - p.BalanceClient = b - p.MorphClient = m - }) - require.NoError(t, err, "failed to create processor") - - ev := frostfsEvent.Withdraw{ - IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, - UserValue: util.Uint160{100}, - AmountValue: 1000, - } - - proc.handleWithdraw(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - lock, err := util.Uint160DecodeBytesBE(ev.ID()[:util.Uint160Size]) - require.NoError(t, err, "failed to decode ID") - var expLock balance.LockPrm - expLock.SetAmount(ev.AmountValue) - expLock.SetID(ev.IDValue) - expLock.SetDueEpoch(int64(es.epochCounter) + int64(lockAccountLifetime)) - expLock.SetLock(lock) - expLock.SetUser(ev.UserValue) - - require.EqualValues(t, []balance.LockPrm{expLock}, b.lock, "invalid lock value") -} - -func TestHandleCheque(t *testing.T) { - t.Parallel() - es := &testEpochState{ - epochCounter: 100, - } - b := &testBalaceClient{} - m := &testMorphClient{ - balance: 150, - } - proc, err := newTestProc(t, func(p *Params) { - p.BalanceClient = b - p.MorphClient = m - p.EpochState = es - }) - require.NoError(t, err, "failed to create processor") - - ev := frostfsEvent.Cheque{ - IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, - UserValue: util.Uint160{100}, - AmountValue: 1000, - LockValue: util.Uint160{200}, - } - - proc.handleCheque(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - var expBurn balance.BurnPrm - expBurn.SetAmount(ev.AmountValue) - expBurn.SetID(ev.IDValue) - expBurn.SetTo(util.Uint160{200}) - - require.EqualValues(t, []balance.BurnPrm{expBurn}, b.burn, "invalid burn value") -} - -func TestHandleConfig(t *testing.T) { - t.Parallel() - es := &testEpochState{ - epochCounter: 100, - } - nm := &testNetmapClient{} - m := &testMorphClient{ - balance: 150, - } - proc, err := newTestProc(t, func(p *Params) { - p.NetmapClient = nm - p.MorphClient = m - p.EpochState = es - }) - require.NoError(t, err, "failed to create processor") - - ev := frostfsEvent.Config{ - IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, - KeyValue: []byte{1, 2, 3, 4, 5}, - ValueValue: []byte{6, 7, 8, 9, 0}, - TxHashValue: util.Uint256{100}, - } - - proc.handleConfig(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - var expConfig nmClient.SetConfigPrm - expConfig.SetHash(ev.TxHashValue) - expConfig.SetID(ev.IDValue) - expConfig.SetKey(ev.KeyValue) - expConfig.SetValue(ev.ValueValue) - - require.EqualValues(t, []nmClient.SetConfigPrm{expConfig}, nm.config, "invalid config value") -} - -func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { - p := &Params{ - Log: test.NewLogger(t), - PoolSize: 1, - FrostFSContract: util.Uint160{0}, - BalanceClient: &testBalaceClient{}, - NetmapClient: &testNetmapClient{}, - MorphClient: &testMorphClient{}, - EpochState: &testEpochState{}, - AlphabetState: &testAlphabetState{isAlphabet: true}, - Converter: &testPrecisionConverter{}, - MintEmitCacheSize: 100, - MintEmitThreshold: 10, - MintEmitValue: fixedn.Fixed8(50), - GasBalanceThreshold: 50, - } - - nonDefault(p) - - return New(p) -} - -type testEpochState struct { - epochCounter uint64 -} - -func (s *testEpochState) EpochCounter() uint64 { - return s.epochCounter -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet(context.Context) bool { - return s.isAlphabet -} - -type testPrecisionConverter struct{} - -func (c *testPrecisionConverter) ToBalancePrecision(v int64) int64 { - return v -} - -type testBalaceClient struct { - mint []balance.MintPrm - lock []balance.LockPrm - burn []balance.BurnPrm -} - -func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error { - c.mint = append(c.mint, p) - return nil -} - -func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error { - c.lock = append(c.lock, p) - return nil -} - -func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error { - c.burn = append(c.burn, p) - return nil -} - -type testNetmapClient struct { - config []nmClient.SetConfigPrm -} - -func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error { - c.config = append(c.config, p) - return nil -} - -type transferGas struct { - receiver util.Uint160 - amount fixedn.Fixed8 -} - -type testMorphClient struct { - balance int64 - transferGas []transferGas -} - -func (c *testMorphClient) GasBalance() (res int64, err error) { - return c.balance, nil -} - -func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { - c.transferGas = append(c.transferGas, transferGas{ - receiver: receiver, - amount: amount, - }) - return nil -} diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go deleted file mode 100644 index d10eb9660..000000000 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ /dev/null @@ -1,140 +0,0 @@ -package frostfs - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -const ( - // lockAccountLifeTime defines the amount of epochs when lock account is valid. - lockAccountLifetime uint64 = 20 -) - -// Process deposit event by invoking a balance contract and sending native -// gas in the sidechain. -func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit) - return true - } - - prm := balance.MintPrm{} - - prm.SetTo(deposit.To()) - prm.SetAmount(np.converter.ToBalancePrecision(deposit.Amount())) - prm.SetID(deposit.ID()) - - // send transferX to a balance contract - err := np.balanceClient.Mint(ctx, prm) - if err != nil { - np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) - } - - curEpoch := np.epochState.EpochCounter() - receiver := deposit.To() - - // check if the receiver has already received some mint GAS emissions - // we should lock there even though LRU cache is already thread save - // we lock there because GAS transfer AND cache update must be atomic - np.mintEmitLock.Lock() - defer np.mintEmitLock.Unlock() - - val, ok := np.mintEmitCache.Get(receiver.String()) - if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined, - zap.Stringer("receiver", receiver), - zap.Uint64("last_emission", val), - zap.Uint64("current_epoch", curEpoch)) - - return false - } - - // get gas balance of the node - // before gas transfer check if the balance is greater than the threshold - balance, err := np.morphClient.GasBalance() - if err != nil { - np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) - return false - } - - if balance < np.gasBalanceThreshold { - np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached, - zap.Int64("balance", balance), - zap.Int64("threshold", np.gasBalanceThreshold)) - - return false - } - - err = np.morphClient.TransferGas(receiver, np.mintEmitValue) - if err != nil { - np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, - zap.Error(err)) - - return false - } - - np.mintEmitCache.Add(receiver.String(), curEpoch) - - return true -} - -// Process withdraw event by locking assets in the balance account. -func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw) - return true - } - - // create lock account - lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) - if err != nil { - np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err)) - return false - } - - curEpoch := np.epochState.EpochCounter() - - prm := balance.LockPrm{} - - prm.SetID(withdraw.ID()) - prm.SetUser(withdraw.User()) - prm.SetLock(lock) - prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount())) - prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime)) - - err = np.balanceClient.Lock(ctx, prm) - if err != nil { - np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) - return false - } - - return true -} - -// Process cheque event by transferring assets from the lock account back to -// the reserve account. -func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque) - return true - } - - prm := balance.BurnPrm{} - - prm.SetTo(cheque.LockAccount()) - prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount())) - prm.SetID(cheque.ID()) - - err := np.balanceClient.Burn(ctx, prm) - if err != nil { - np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) - return false - } - - return true -} diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go deleted file mode 100644 index dc579f6bb..000000000 --- a/pkg/innerring/processors/frostfs/process_config.go +++ /dev/null @@ -1,34 +0,0 @@ -package frostfs - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" - "go.uber.org/zap" -) - -// Process config event by setting configuration value from the mainchain in -// the sidechain. -func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig) - return true - } - - prm := nmClient.SetConfigPrm{} - - prm.SetID(config.ID()) - prm.SetKey(config.Key()) - prm.SetValue(config.Value()) - prm.SetHash(config.TxHash()) - - err := np.netmapClient.SetConfig(ctx, prm) - if err != nil { - np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) - return false - } - - return true -} diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go deleted file mode 100644 index 9d3bf65cd..000000000 --- a/pkg/innerring/processors/frostfs/processor.go +++ /dev/null @@ -1,183 +0,0 @@ -package frostfs - -import ( - "context" - "errors" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - lru "github.com/hashicorp/golang-lru/v2" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" -) - -type ( - // EpochState is a callback interface for inner ring global state. - EpochState interface { - EpochCounter() uint64 - } - - // AlphabetState is a callback interface for inner ring global state. - AlphabetState interface { - IsAlphabet(context.Context) bool - } - - // PrecisionConverter converts balance amount values. - PrecisionConverter interface { - ToBalancePrecision(int64) int64 - } - - BalanceClient interface { - Mint(ctx context.Context, p balance.MintPrm) error - Lock(ctx context.Context, p balance.LockPrm) error - Burn(ctx context.Context, p balance.BurnPrm) error - } - - NetmapClient interface { - SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error - } - - MorphClient interface { - GasBalance() (res int64, err error) - TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error - } - - // Processor of events produced by frostfs contract in main net. - Processor struct { - log *logger.Logger - metrics metrics.Register - pool *ants.Pool - frostfsContract util.Uint160 - balanceClient BalanceClient - netmapClient NetmapClient - morphClient MorphClient - epochState EpochState - alphabetState AlphabetState - converter PrecisionConverter - mintEmitLock sync.Mutex - mintEmitCache *lru.Cache[string, uint64] - mintEmitThreshold uint64 - mintEmitValue fixedn.Fixed8 - gasBalanceThreshold int64 - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - Metrics metrics.Register - PoolSize int - FrostFSContract util.Uint160 - BalanceClient BalanceClient - NetmapClient NetmapClient - MorphClient MorphClient - EpochState EpochState - AlphabetState AlphabetState - Converter PrecisionConverter - MintEmitCacheSize int - MintEmitThreshold uint64 // in epochs - MintEmitValue fixedn.Fixed8 - GasBalanceThreshold int64 - } -) - -const ( - depositNotification = "Deposit" - withdrawNotification = "Withdraw" - chequeNotification = "Cheque" - configNotification = "SetConfig" -) - -// New creates frostfs mainnet contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/frostfs: logger is not set") - case p.MorphClient == nil: - return nil, errors.New("ir/frostfs: neo:morph client is not set") - case p.EpochState == nil: - return nil, errors.New("ir/frostfs: global state is not set") - case p.AlphabetState == nil: - return nil, errors.New("ir/frostfs: global state is not set") - case p.Converter == nil: - return nil, errors.New("ir/frostfs: balance precision converter is not set") - } - - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) - } - - lruCache, err := lru.New[string, uint64](p.MintEmitCacheSize) - if err != nil { - return nil, fmt.Errorf("ir/frostfs: can't create LRU cache for gas emission: %w", err) - } - - metricsRegister := p.Metrics - if metricsRegister == nil { - metricsRegister = metrics.DefaultRegister{} - } - - return &Processor{ - log: p.Log, - metrics: metricsRegister, - pool: pool, - frostfsContract: p.FrostFSContract, - balanceClient: p.BalanceClient, - netmapClient: p.NetmapClient, - morphClient: p.MorphClient, - epochState: p.EpochState, - alphabetState: p.AlphabetState, - converter: p.Converter, - mintEmitCache: lruCache, - mintEmitThreshold: p.MintEmitThreshold, - mintEmitValue: p.MintEmitValue, - gasBalanceThreshold: p.GasBalanceThreshold, - }, nil -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return []event.NotificationHandlerInfo{ - { - Contract: np.frostfsContract, - Type: event.TypeFromString(depositNotification), - Parser: frostfsEvent.ParseDeposit, - Handlers: []event.Handler{np.handleDeposit}, - }, - { - Contract: np.frostfsContract, - Type: event.TypeFromString(withdrawNotification), - Parser: frostfsEvent.ParseWithdraw, - Handlers: []event.Handler{np.handleWithdraw}, - }, - { - Contract: np.frostfsContract, - Type: event.TypeFromString(chequeNotification), - Parser: frostfsEvent.ParseCheque, - Handlers: []event.Handler{np.handleCheque}, - }, - { - Contract: np.frostfsContract, - Type: event.TypeFromString(configNotification), - Parser: frostfsEvent.ParseConfig, - Handlers: []event.Handler{np.handleConfig}, - }, - } -} - -// ListenerNotaryParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - return nil -} - -// ListenerNotaryHandlers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/governance/events.go b/pkg/innerring/processors/governance/events.go deleted file mode 100644 index 76cfddc07..000000000 --- a/pkg/innerring/processors/governance/events.go +++ /dev/null @@ -1,26 +0,0 @@ -package governance - -import "github.com/nspcc-dev/neo-go/pkg/util" - -// Sync is an event to start governance synchronization. -type Sync struct { - // txHash is used in notary environmental - // for calculating unique but same for - // all notification receivers values. - txHash util.Uint256 -} - -// TxHash returns hash of the TX that triggers -// synchronization process. -func (s Sync) TxHash() util.Uint256 { - return s.txHash -} - -// MorphEvent implements Event interface. -func (s Sync) MorphEvent() {} - -// NewSyncEvent creates Sync event that was produced -// in transaction with txHash hash. -func NewSyncEvent(txHash util.Uint256) Sync { - return Sync{txHash: txHash} -} diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go deleted file mode 100644 index 7e8ab629d..000000000 --- a/pkg/innerring/processors/governance/handlers.go +++ /dev/null @@ -1,49 +0,0 @@ -package governance - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" - "github.com/nspcc-dev/neo-go/pkg/core/native" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) { - var ( - typ string - hash util.Uint256 - ) - - switch et := e.(type) { - case Sync: - typ = "sync" - hash = et.TxHash() - case rolemanagement.Designate: - if et.Role != noderoles.NeoFSAlphabet { - return - } - - typ = native.DesignationEventName - hash = et.TxHash - default: - return - } - - gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ)) - - // send event to the worker pool - - err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool { - return gp.processAlphabetSync(ctx, hash) - }) - if err != nil { - // there system can be moved into controlled degradation stage - gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained, - zap.Int("capacity", gp.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go deleted file mode 100644 index 864c5da67..000000000 --- a/pkg/innerring/processors/governance/handlers_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package governance - -import ( - "context" - "encoding/binary" - "sort" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestHandleAlphabetSyncEvent(t *testing.T) { - t.Parallel() - testKeys := generateTestKeys(t) - - es := &testEpochState{ - epoch: 100, - } - as := &testAlphabetState{ - isAlphabet: true, - } - v := &testVoter{} - irf := &testIRFetcher{ - publicKeys: testKeys.sidechainKeys, - } - m := &testMorphClient{ - commiteeKeys: testKeys.sidechainKeys, - } - mn := &testMainnetClient{ - alphabetKeys: testKeys.mainnetKeys, - } - f := &testFrostFSClient{} - - proc, err := New( - &Params{ - Log: test.NewLogger(t), - EpochState: es, - AlphabetState: as, - Voter: v, - IRFetcher: irf, - MorphClient: m, - MainnetClient: mn, - FrostFSClient: f, - }, - ) - - require.NoError(t, err, "failed to create processor") - - ev := Sync{ - txHash: util.Uint256{100}, - } - - proc.HandleAlphabetSync(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.EqualValues(t, []VoteValidatorPrm{ - { - Validators: testKeys.newAlphabetExp, - Hash: &ev.txHash, - }, - }, v.votes, "invalid vote calls") - - var expAlphabetUpdate client.UpdateAlphabetListPrm - expAlphabetUpdate.SetHash(ev.txHash) - expAlphabetUpdate.SetList(testKeys.newInnerRingExp) - require.EqualValues(t, []client.UpdateAlphabetListPrm{expAlphabetUpdate}, m.alphabetUpdates, "invalid alphabet updates") - - var expNotaryUpdate client.UpdateNotaryListPrm - expNotaryUpdate.SetHash(ev.txHash) - expNotaryUpdate.SetList(testKeys.newAlphabetExp) - require.EqualValues(t, []client.UpdateNotaryListPrm{expNotaryUpdate}, m.notaryUpdates, "invalid notary list updates") - - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, es.epoch) - - id := append([]byte(alphabetUpdateIDPrefix), buf...) - var expFrostFSAlphabetUpd frostfscontract.AlphabetUpdatePrm - expFrostFSAlphabetUpd.SetID(id) - expFrostFSAlphabetUpd.SetPubs(testKeys.newAlphabetExp) - - require.EqualValues(t, []frostfscontract.AlphabetUpdatePrm{expFrostFSAlphabetUpd}, f.updates, "invalid FrostFS alphabet updates") -} - -func TestHandleAlphabetDesignateEvent(t *testing.T) { - t.Parallel() - testKeys := generateTestKeys(t) - - es := &testEpochState{ - epoch: 100, - } - as := &testAlphabetState{ - isAlphabet: true, - } - v := &testVoter{} - irf := &testIRFetcher{ - publicKeys: testKeys.sidechainKeys, - } - m := &testMorphClient{ - commiteeKeys: testKeys.sidechainKeys, - } - mn := &testMainnetClient{ - alphabetKeys: testKeys.mainnetKeys, - } - f := &testFrostFSClient{} - - proc, err := New( - &Params{ - Log: test.NewLogger(t), - EpochState: es, - AlphabetState: as, - Voter: v, - IRFetcher: irf, - MorphClient: m, - MainnetClient: mn, - FrostFSClient: f, - }, - ) - - require.NoError(t, err, "failed to create processor") - - ev := rolemanagement.Designate{ - TxHash: util.Uint256{100}, - Role: noderoles.NeoFSAlphabet, - } - - proc.HandleAlphabetSync(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.EqualValues(t, []VoteValidatorPrm{ - { - Validators: testKeys.newAlphabetExp, - Hash: &ev.TxHash, - }, - }, v.votes, "invalid vote calls") - - var alpabetUpdExp client.UpdateAlphabetListPrm - alpabetUpdExp.SetList(testKeys.newInnerRingExp) - alpabetUpdExp.SetHash(ev.TxHash) - require.EqualValues(t, []client.UpdateAlphabetListPrm{alpabetUpdExp}, m.alphabetUpdates, "invalid alphabet updates") - - var expNotaryUpdate client.UpdateNotaryListPrm - expNotaryUpdate.SetList(testKeys.newAlphabetExp) - expNotaryUpdate.SetHash(ev.TxHash) - require.EqualValues(t, []client.UpdateNotaryListPrm{expNotaryUpdate}, m.notaryUpdates, "invalid notary list updates") - - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, es.epoch) - - id := append([]byte(alphabetUpdateIDPrefix), buf...) - var expFrostFSAlphabetUpd frostfscontract.AlphabetUpdatePrm - expFrostFSAlphabetUpd.SetID(id) - expFrostFSAlphabetUpd.SetPubs(testKeys.newAlphabetExp) - - require.EqualValues(t, []frostfscontract.AlphabetUpdatePrm{expFrostFSAlphabetUpd}, f.updates, "invalid FrostFS alphabet updates") -} - -type testKeys struct { - sidechainKeys keys.PublicKeys - mainnetKeys keys.PublicKeys - newAlphabetExp keys.PublicKeys - newInnerRingExp keys.PublicKeys -} - -func generateTestKeys(t *testing.T) testKeys { - for { - var result testKeys - - for range 4 { - pk, err := keys.NewPrivateKey() - require.NoError(t, err, "failed to create private key") - result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey()) - } - - result.mainnetKeys = append(result.mainnetKeys, result.sidechainKeys...) - pk, err := keys.NewPrivateKey() - require.NoError(t, err, "failed to create private key") - result.mainnetKeys = append(result.mainnetKeys, pk.PublicKey()) - - result.newAlphabetExp, err = newAlphabetList(result.sidechainKeys, result.mainnetKeys) - require.NoError(t, err, "failed to create expected new alphabet") - - if len(result.newAlphabetExp) == 0 { - continue // can be happen because of random and sort - } - - var irKeys keys.PublicKeys - irKeys = append(irKeys, result.sidechainKeys...) - result.newInnerRingExp, err = updateInnerRing(irKeys, result.sidechainKeys, result.newAlphabetExp) - require.NoError(t, err, "failed to create expected new IR") - sort.Sort(result.newInnerRingExp) - - return result - } -} - -type testEpochState struct { - epoch uint64 -} - -func (s *testEpochState) EpochCounter() uint64 { - return s.epoch -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet(context.Context) bool { - return s.isAlphabet -} - -type testVoter struct { - votes []VoteValidatorPrm -} - -func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error { - v.votes = append(v.votes, prm) - return nil -} - -type testIRFetcher struct { - publicKeys keys.PublicKeys -} - -func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { - return f.publicKeys, nil -} - -type testMorphClient struct { - commiteeKeys keys.PublicKeys - - alphabetUpdates []client.UpdateAlphabetListPrm - notaryUpdates []client.UpdateNotaryListPrm -} - -func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) { - return c.commiteeKeys, nil -} - -func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error { - c.alphabetUpdates = append(c.alphabetUpdates, prm) - return nil -} - -func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error { - c.notaryUpdates = append(c.notaryUpdates, prm) - return nil -} - -type testMainnetClient struct { - alphabetKeys keys.PublicKeys - designateHash util.Uint160 -} - -func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { - return c.alphabetKeys, nil -} - -func (c *testMainnetClient) GetDesignateHash() util.Uint160 { - return c.designateHash -} - -type testFrostFSClient struct { - updates []frostfscontract.AlphabetUpdatePrm -} - -func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error { - c.updates = append(c.updates, p) - return nil -} diff --git a/pkg/innerring/processors/governance/list.go b/pkg/innerring/processors/governance/list.go deleted file mode 100644 index 9f982f7c6..000000000 --- a/pkg/innerring/processors/governance/list.go +++ /dev/null @@ -1,110 +0,0 @@ -package governance - -import ( - "errors" - "fmt" - "sort" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -var ( - errNotEnoughKeys = errors.New("alphabet list in mainnet is too short") - errNotEqualLen = errors.New("old and new alphabet lists have different length") - errEmptySidechain = errors.New("sidechain list is empty") -) - -// newAlphabetList returns an updated list of sidechain keys with no more than 1\3 -// of new keys from the mainnet list. The function returns `errEmptySidechain` if -// the sidechain list is empty. The function returns `errNotEnoughKeys` if the mainnet -// list contains less keys than the sidechain list. The function returns (nil, nil) if -// the mainnet list contains all keys from the sidechain list. -// -// Sorts passed slices. -func newAlphabetList(sidechain, mainnet keys.PublicKeys) (keys.PublicKeys, error) { - sort.Sort(sidechain) - sort.Sort(mainnet) - - ln := len(sidechain) - if ln == 0 { - return nil, errEmptySidechain - } - - if len(mainnet) < ln { - return nil, fmt.Errorf("%w: expecting %d keys", errNotEnoughKeys, ln) - } - - hmap := make(map[string]bool, ln) - result := make(keys.PublicKeys, 0, ln) - - for _, node := range sidechain { - hmap[node.Address()] = false - } - - newNodes := 0 - newNodeLimit := (ln - 1) / 3 - - for _, node := range mainnet { - if len(result) == ln { - break - } - - limitReached := newNodes == newNodeLimit - - mainnetAddr := node.Address() - if _, ok := hmap[mainnetAddr]; !ok { - if limitReached { - continue - } - newNodes++ - } else { - hmap[mainnetAddr] = true - } - - result = append(result, node) - } - - if newNodes == 0 { - return nil, nil - } - - for _, node := range sidechain { - if len(result) == ln { - break - } - - if !hmap[node.Address()] { - result = append(result, node) - } - } - - sort.Sort(result) - - return result, nil -} - -// updateInnerRing function removes `before` keys from `innerRing` and adds -// `after` keys in the list. If the length of `before` and `after` is not the same, -// the function returns errNotEqualLen. -func updateInnerRing(innerRing, before, after keys.PublicKeys) (keys.PublicKeys, error) { - lnBefore := len(before) - if lnBefore != len(after) { - return nil, errNotEqualLen - } - - result := make(keys.PublicKeys, 0, len(innerRing)) - - // O(n^2) for 7 nodes is not THAT bad. -loop: - for i := range innerRing { - for j := range before { - if innerRing[i].Equal(before[j]) { - result = append(result, after[j]) - continue loop - } - } - result = append(result, innerRing[i]) - } - - return result, nil -} diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go deleted file mode 100644 index 4ecebf05b..000000000 --- a/pkg/innerring/processors/governance/list_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package governance - -import ( - "sort" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestNewAlphabetList(t *testing.T) { - k, err := generateKeys(14) - require.NoError(t, err) - - orig := keys.PublicKeys{k[0], k[1], k[2], k[3], k[4], k[5], k[6]} - - t.Run("no sidechain keys", func(t *testing.T) { - _, err := newAlphabetList(nil, orig) - require.ErrorIs(t, err, errEmptySidechain) - }) - - t.Run("same keys", func(t *testing.T) { - list, err := newAlphabetList(orig, orig) - require.NoError(t, err) - require.Nil(t, list) - }) - - t.Run("not enough mainnet keys", func(t *testing.T) { - _, err := newAlphabetList(orig, orig[:len(orig)-1]) - require.ErrorIs(t, err, errNotEnoughKeys) - }) - - t.Run("less than third new keys", func(t *testing.T) { - exp := keys.PublicKeys{k[1], k[2], k[3], k[4], k[5], k[6], k[7]} - got, err := newAlphabetList(orig, exp) - require.NoError(t, err) - require.True(t, equalPublicKeyLists(exp, got)) - }) - - t.Run("completely new list of keys", func(t *testing.T) { - list := orig - exp := keys.PublicKeys{k[7], k[8], k[9], k[10], k[11], k[12], k[13]} - - rounds := []keys.PublicKeys{ - {k[0], k[1], k[2], k[3], k[4], k[7], k[8]}, - {k[0], k[1], k[2], k[7], k[8], k[9], k[10]}, - {k[0], k[7], k[8], k[9], k[10], k[11], k[12]}, - exp, - } - ln := len(rounds) - - for i := range ln { - list, err = newAlphabetList(list, exp) - require.NoError(t, err) - require.True(t, equalPublicKeyLists(list, rounds[i])) - } - }) - - t.Run("unsorted keys", func(t *testing.T) { - orig := keys.PublicKeys{k[1], k[2], k[3], k[4]} - main := keys.PublicKeys{k[1], k[2], k[5], k[4]} - - exp := main.Copy() - sort.Sort(exp) - - got, err := newAlphabetList(orig, main) - require.NoError(t, err) - require.True(t, equalPublicKeyLists(exp, got)) // expect {1, 2, 4, 5}, not {1, 2, 3, 5} - }) - - t.Run("new keys in the middle", func(t *testing.T) { - orig := keys.PublicKeys{k[0], k[1], k[2], k[6], k[7], k[8], k[9]} - // `exp` should contain maximum amount of new keys (2) in the middle - exp := keys.PublicKeys{k[0], k[3], k[4], k[6], k[7], k[8], k[9]} - got, err := newAlphabetList(orig, exp) - require.NoError(t, err) - require.True(t, equalPublicKeyLists(exp, got)) - }) -} - -func TestUpdateInnerRing(t *testing.T) { - k, err := generateKeys(6) - require.NoError(t, err) - - t.Run("same keys", func(t *testing.T) { - ir := k[:3] - before := k[1:3] - after := keys.PublicKeys{k[2], k[1]} - - list, err := updateInnerRing(ir, before, after) - require.NoError(t, err) - - sort.Sort(ir) - sort.Sort(list) - require.True(t, equalPublicKeyLists(ir, list)) - }) - - t.Run("unknown keys", func(t *testing.T) { - ir := k[:3] - before := k[3:4] - after := k[4:5] - - list, err := updateInnerRing(ir, before, after) - require.NoError(t, err) - - require.True(t, equalPublicKeyLists(ir, list)) - }) - - t.Run("different size", func(t *testing.T) { - ir := k[:3] - before := k[1:3] - after := k[4:5] - - _, err = updateInnerRing(ir, before, after) - require.ErrorIs(t, err, errNotEqualLen) - }) - - t.Run("new list", func(t *testing.T) { - ir := k[:3] - before := k[1:3] - after := k[4:6] - exp := keys.PublicKeys{k[0], k[4], k[5]} - - list, err := updateInnerRing(ir, before, after) - require.NoError(t, err) - - require.True(t, equalPublicKeyLists(exp, list)) - }) -} - -func generateKeys(n int) (keys.PublicKeys, error) { - pubKeys := make(keys.PublicKeys, 0, n) - - for range n { - privKey, err := keys.NewPrivateKey() - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, privKey.PublicKey()) - } - - sort.Sort(pubKeys) - - return pubKeys, nil -} - -func equalPublicKeyLists(a, b keys.PublicKeys) bool { - if len(a) != len(b) { - return false - } - - for i, node := range a { - if !b[i].Equal(node) { - return false - } - } - - return true -} diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go deleted file mode 100644 index 6e22abb3c..000000000 --- a/pkg/innerring/processors/governance/process_update.go +++ /dev/null @@ -1,160 +0,0 @@ -package governance - -import ( - "context" - "encoding/binary" - "encoding/hex" - "sort" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -const ( - alphabetUpdateIDPrefix = "AlphabetUpdate" -) - -func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool { - if !gp.alphabetState.IsAlphabet(ctx) { - gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) - return true - } - - mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, - zap.Error(err)) - return false - } - - sidechainAlphabet, err := gp.morphClient.Committee() - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain, - zap.Error(err)) - return false - } - - newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, - zap.Error(err)) - return false - } - - if newAlphabet == nil { - gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) - return true - } - - gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, - zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)), - zap.String("new_alphabet", prettyKeys(newAlphabet)), - ) - - votePrm := VoteValidatorPrm{ - Validators: newAlphabet, - Hash: &txHash, - } - - // 1. Vote to sidechain committee via alphabet contracts. - err = gp.voter.VoteForSidechainValidator(ctx, votePrm) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee, - zap.Error(err)) - } - - // 2. Update NeoFSAlphabet role in the sidechain. - gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash) - - // 3. Update notary role in the sidechain. - gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash) - - // 4. Update FrostFS contract in the mainnet. - gp.updateFrostFSContractInMainnet(ctx, newAlphabet) - - gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate) - - return true -} - -func prettyKeys(keys keys.PublicKeys) string { - const delimiter = "," - - sb := strings.Builder{} - for _, key := range keys { - sb.WriteString(hex.EncodeToString(key.Bytes())) - sb.WriteString(delimiter) - } - - return strings.TrimRight(sb.String(), delimiter) -} - -func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { - innerRing, err := gp.irFetcher.InnerRingKeys(ctx) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, - zap.Error(err)) - return - } - - newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, - zap.Error(err)) - return - } - - sort.Sort(newInnerRing) - - gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList, - zap.String("before", prettyKeys(innerRing)), - zap.String("after", prettyKeys(newInnerRing)), - ) - - updPrm := client.UpdateAlphabetListPrm{} - updPrm.SetList(newInnerRing) - updPrm.SetHash(txHash) - - if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil { - gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, - zap.Error(err)) - } -} - -func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) { - updPrm := client.UpdateNotaryListPrm{} - - updPrm.SetList(newAlphabet) - updPrm.SetHash(txHash) - - err := gp.morphClient.UpdateNotaryList(ctx, updPrm) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, - zap.Error(err)) - } -} - -func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) { - epoch := gp.epochState.EpochCounter() - - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, epoch) - - id := append([]byte(alphabetUpdateIDPrefix), buf...) - - prm := frostfscontract.AlphabetUpdatePrm{} - - prm.SetID(id) - prm.SetPubs(newAlphabet) - - err := gp.frostfsClient.AlphabetUpdate(ctx, prm) - if err != nil { - gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, - zap.Error(err)) - } -} diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go deleted file mode 100644 index 2d131edda..000000000 --- a/pkg/innerring/processors/governance/processor.go +++ /dev/null @@ -1,178 +0,0 @@ -package governance - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" -) - -// ProcessorPoolSize limits the pool size for governance Processor. Processor manages -// governance sync tasks. This process must not be interrupted by other sync -// operation, so we limit the pool size for the processor to one. -const ProcessorPoolSize = 1 - -type ( - // AlphabetState is a callback interface for innerring global state. - AlphabetState interface { - IsAlphabet(context.Context) bool - } -) - -// VoteValidatorPrm groups parameters of the VoteForSidechainValidator -// operation. -type VoteValidatorPrm struct { - Validators keys.PublicKeys - Hash *util.Uint256 // hash of the transaction that triggered voting -} - -// Voter is a callback interface for alphabet contract voting. -type Voter interface { - VoteForSidechainValidator(context.Context, VoteValidatorPrm) error -} - -type ( - // EpochState is a callback interface for innerring global state. - EpochState interface { - EpochCounter() uint64 - } - - // IRFetcher is a callback interface for innerring keys. - // Implementation must take into account availability of - // the notary contract. - IRFetcher interface { - InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) - } - - FrostFSClient interface { - AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error - } - - NetmapClient interface { - UpdateInnerRing(p nmClient.UpdateIRPrm) error - } - - MainnetClient interface { - NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) - GetDesignateHash() util.Uint160 - } - - MorphClient interface { - Committee() (res keys.PublicKeys, err error) - UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error - UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error - } - - // Processor of events related to governance in the network. - Processor struct { - log *logger.Logger - metrics metrics.Register - pool *ants.Pool - frostfsClient FrostFSClient - - alphabetState AlphabetState - epochState EpochState - voter Voter - irFetcher IRFetcher - - mainnetClient MainnetClient - morphClient MorphClient - - designate util.Uint160 - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - Metrics metrics.Register - - AlphabetState AlphabetState - EpochState EpochState - Voter Voter - IRFetcher IRFetcher - - MorphClient MorphClient - MainnetClient MainnetClient - FrostFSClient FrostFSClient - } -) - -// New creates a balance contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/governance: logger is not set") - case p.MainnetClient == nil: - return nil, errors.New("ir/governance: neo:mainnet client is not set") - case p.MorphClient == nil: - return nil, errors.New("ir/governance: neo:sidechain client is not set") - case p.AlphabetState == nil: - return nil, errors.New("ir/governance: global state is not set") - case p.EpochState == nil: - return nil, errors.New("ir/governance: global state is not set") - case p.Voter == nil: - return nil, errors.New("ir/governance: global state is not set") - case p.IRFetcher == nil: - return nil, errors.New("ir/governance: innerring keys fetcher is not set") - } - - pool, err := ants.NewPool(ProcessorPoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/governance: can't create worker pool: %w", err) - } - - metricsRegister := p.Metrics - if metricsRegister == nil { - metricsRegister = metrics.DefaultRegister{} - } - - // result is cached by neo-go, so we can pre-calc it - designate := p.MainnetClient.GetDesignateHash() - - return &Processor{ - log: p.Log, - metrics: metricsRegister, - pool: pool, - frostfsClient: p.FrostFSClient, - alphabetState: p.AlphabetState, - epochState: p.EpochState, - voter: p.Voter, - irFetcher: p.IRFetcher, - mainnetClient: p.MainnetClient, - morphClient: p.MorphClient, - designate: designate, - }, nil -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return []event.NotificationHandlerInfo{ - { - Contract: gp.designate, - Type: event.TypeFromString(native.DesignationEventName), - Parser: rolemanagement.ParseDesignate, - Handlers: []event.Handler{gp.HandleAlphabetSync}, - }, - } -} - -// ListenerNotaryParsers for the 'event.Listener' event producer. -func (gp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - return nil -} - -// ListenerNotaryHandlers for the 'event.Listener' event producer. -func (gp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go deleted file mode 100644 index abd5b089a..000000000 --- a/pkg/innerring/processors/netmap/cleanup_table.go +++ /dev/null @@ -1,119 +0,0 @@ -package netmap - -import ( - "bytes" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -type ( - cleanupTable struct { - sync.RWMutex - enabled bool - threshold uint64 - lastAccess map[string]epochStampWithNodeInfo - } - - epochStamp struct { - epoch uint64 - removeFlag bool - } - - epochStampWithNodeInfo struct { - epochStamp - - binNodeInfo []byte - - maintenance bool - } -) - -func newCleanupTable(enabled bool, threshold uint64) cleanupTable { - return cleanupTable{ - enabled: enabled, - threshold: threshold, - lastAccess: make(map[string]epochStampWithNodeInfo), - } -} - -// Update cleanup table based on on-chain information about netmap. -func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) { - c.Lock() - defer c.Unlock() - - nmNodes := snapshot.Nodes() - - // replacing map is less memory efficient but faster - newMap := make(map[string]epochStampWithNodeInfo, len(nmNodes)) - - for i := range nmNodes { - binNodeInfo := nmNodes[i].Marshal() - - keyString := netmap.StringifyPublicKey(nmNodes[i]) - - access, ok := c.lastAccess[keyString] - if ok { - access.removeFlag = false // reset remove Flag on each Update - } else { - access.epoch = now - } - - access.binNodeInfo = binNodeInfo - access.maintenance = nmNodes[i].Status().IsMaintenance() - - newMap[keyString] = access - } - - c.lastAccess = newMap -} - -// updates last access time of the netmap node by string public key. -// -// Returns true if at least one condition is met: -// - node hasn't been accessed yet; -// - remove flag is set; -// - binary node info has changed. -func (c *cleanupTable) touch(keyString string, now uint64, binNodeInfo []byte) bool { - c.Lock() - defer c.Unlock() - - access, ok := c.lastAccess[keyString] - result := !ok || access.removeFlag || !bytes.Equal(access.binNodeInfo, binNodeInfo) - - access.removeFlag = false // reset remove flag on each touch - access.epoch = max(access.epoch, now) - access.binNodeInfo = binNodeInfo // update binary node info - - c.lastAccess[keyString] = access - - return result -} - -func (c *cleanupTable) flag(keyString string) { - c.Lock() - defer c.Unlock() - - if access, ok := c.lastAccess[keyString]; ok { - access.removeFlag = true - c.lastAccess[keyString] = access - } -} - -func (c *cleanupTable) forEachRemoveCandidate(epoch uint64, f func(string) error) error { - c.Lock() - defer c.Unlock() - - for keyString, access := range c.lastAccess { - if !access.maintenance && epoch-access.epoch > c.threshold { - access.removeFlag = true // set remove flag - c.lastAccess[keyString] = access - - if err := f(keyString); err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go deleted file mode 100644 index 208bd5496..000000000 --- a/pkg/innerring/processors/netmap/cleanup_table_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package netmap - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func genKey(t *testing.T) *keys.PrivateKey { - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - return priv -} - -func TestCleanupTable(t *testing.T) { - infos := []netmap.NodeInfo{ - newNodeInfo(genKey(t).PublicKey()), - newNodeInfo(genKey(t).PublicKey()), - newNodeInfo(genKey(t).PublicKey()), - } - - var networkMap netmap.NetMap - networkMap.SetNodes(infos) - - mapInfos := make(map[string][]byte) - - for i := range infos { - binNodeInfo := infos[i].Marshal() - - mapInfos[netmap.StringifyPublicKey(infos[i])] = binNodeInfo - } - - t.Run("update", func(t *testing.T) { - c := newCleanupTable(true, 1) - c.update(networkMap, 1) - require.Len(t, c.lastAccess, len(infos)) - - for k, v := range c.lastAccess { - require.EqualValues(t, 1, v.epoch) - require.False(t, v.removeFlag) - - _, ok := mapInfos[k] - require.True(t, ok) - } - - t.Run("update with flagged", func(t *testing.T) { - key := netmap.StringifyPublicKey(infos[0]) - c.flag(key) - - c.update(networkMap, 2) - require.EqualValues(t, 1, c.lastAccess[key].epoch) - require.False(t, c.lastAccess[key].removeFlag) - }) - }) - - t.Run("touch", func(t *testing.T) { - c := newCleanupTable(true, 1) - c.update(networkMap, 1) - - key := netmap.StringifyPublicKey(infos[1]) - require.False(t, c.touch(key, 11, mapInfos[key])) - require.EqualValues(t, 11, c.lastAccess[key].epoch) - - updNodeInfo := []byte("changed node info") - - require.True(t, c.touch(key, 11, updNodeInfo)) - require.EqualValues(t, 11, c.lastAccess[key].epoch) - - require.True(t, c.touch(key+"x", 12, updNodeInfo)) - require.EqualValues(t, 12, c.lastAccess[key+"x"].epoch) - }) - - t.Run("flag", func(t *testing.T) { - c := newCleanupTable(true, 1) - c.update(networkMap, 1) - - key := netmap.StringifyPublicKey(infos[1]) - c.flag(key) - require.True(t, c.lastAccess[key].removeFlag) - - require.True(t, c.touch(key, 2, mapInfos[key])) - require.False(t, c.lastAccess[key].removeFlag) - }) - - t.Run("iterator", func(t *testing.T) { - c := newCleanupTable(true, 2) - c.update(networkMap, 1) - - t.Run("no nodes to remove", func(t *testing.T) { - cnt := 0 - require.NoError(t, - c.forEachRemoveCandidate(2, func(_ string) error { - cnt++ - return nil - })) - require.EqualValues(t, 0, cnt) - }) - - t.Run("all nodes to remove", func(t *testing.T) { - cnt := 0 - require.NoError(t, - c.forEachRemoveCandidate(4, func(s string) error { - cnt++ - _, ok := mapInfos[s] - require.True(t, ok) - return nil - })) - require.EqualValues(t, len(infos), cnt) - }) - - t.Run("some nodes to remove", func(t *testing.T) { - cnt := 0 - key := netmap.StringifyPublicKey(infos[1]) - - require.True(t, c.touch(key, 4, mapInfos[key])) // one node was updated - - require.NoError(t, - c.forEachRemoveCandidate(4, func(s string) error { - cnt++ - require.NotEqual(t, s, key) - return nil - })) - require.EqualValues(t, len(infos)-1, cnt) - }) - - t.Run("skip maintenance nodes", func(t *testing.T) { - cnt := 0 - infos[1].SetStatus(netmap.Maintenance) - key := netmap.StringifyPublicKey(infos[1]) - c.update(networkMap, 5) - - require.NoError(t, - c.forEachRemoveCandidate(5, func(s string) error { - cnt++ - require.NotEqual(t, s, key) - return nil - })) - require.EqualValues(t, len(infos)-1, cnt) - }) - }) -} - -func newNodeInfo(key *keys.PublicKey) (n netmap.NodeInfo) { - n.SetPublicKey(key.Bytes()) - return n -} diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go deleted file mode 100644 index 4c7199a49..000000000 --- a/pkg/innerring/processors/netmap/handlers.go +++ /dev/null @@ -1,104 +0,0 @@ -package netmap - -import ( - "context" - "encoding/hex" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" - timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "go.uber.org/zap" -) - -func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) { - _ = ev.(timerEvent.NewEpochTick) - np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch")) - - // send an event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) { - epochEvent := ev.(netmapEvent.NewEpoch) - np.log.Info(ctx, logs.Notification, - zap.String("type", "new epoch"), - zap.Uint64("value", epochEvent.EpochNumber())) - - // send an event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool { - return np.processNewEpoch(ctx, epochEvent) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) { - newPeer := ev.(netmapEvent.AddPeer) - - np.log.Info(ctx, logs.Notification, - zap.String("type", "add peer"), - ) - - // send an event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool { - return np.processAddPeer(ctx, newPeer) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) { - updPeer := ev.(netmapEvent.UpdatePeer) - np.log.Info(ctx, logs.Notification, - zap.String("type", "update peer state"), - zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes()))) - - // send event to the worker pool - - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool { - return np.processUpdatePeer(ctx, updPeer) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) { - if !np.netmapSnapshot.enabled { - np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518) - - return - } - - cleanup := ev.(netmapCleanupTick) - - np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner")) - - // send event to the worker pool - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool { - return np.processNetmapCleanupTick(ctx, cleanup) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, - zap.Int("capacity", np.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go deleted file mode 100644 index 934c3790d..000000000 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ /dev/null @@ -1,419 +0,0 @@ -package netmap - -import ( - "context" - "fmt" - "testing" - "time" - - netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestNewEpochTick(t *testing.T) { - t.Parallel() - es := &testEpochState{ - counter: 100, - } - nc := &testNetmapClient{} - - proc, err := newTestProc(t, func(p *Params) { - p.CleanupEnabled = true - p.EpochState = es - p.NetmapClient = nc - }) - - require.NoError(t, err, "failed to create processor") - - ev := timerEvent.NewEpochTick{} - proc.HandleNewEpochTick(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.EqualValues(t, []uint64{101}, nc.newEpochs, "invalid epochs") -} - -func TestNewEpoch(t *testing.T) { - t.Parallel() - var node1 netmap.NodeInfo - key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") - require.NoError(t, err, "failed to parse key1") - node1.SetPublicKey(key1.Bytes()) - - var node2 netmap.NodeInfo - key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3") - require.NoError(t, err, "failed to parse key2") - node2.SetPublicKey(key2.Bytes()) - - network := &netmap.NetMap{} - network.SetNodes([]netmap.NodeInfo{node1, node2}) - - es := &testEpochState{ - counter: 100, - duration: 10, - } - r := &testEpochResetter{} - nc := &testNetmapClient{ - epochDuration: 20, - txHeights: map[util.Uint256]uint32{ - {101}: 10_000, - }, - netmap: network, - } - eh := &testEventHandler{} - - proc, err := newTestProc(t, func(p *Params) { - p.NotaryDepositHandler = eh.Handle - p.AlphabetSyncHandler = eh.Handle - p.NetmapClient = nc - p.EpochTimer = r - p.EpochState = es - }) - - require.NoError(t, err, "failed to create processor") - - ev := netmapEvent.NewEpoch{ - Num: 101, - Hash: util.Uint256{101}, - } - proc.handleNewEpoch(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.Equal(t, nc.epochDuration, es.duration, "invalid epoch duration") - require.Equal(t, ev.Num, es.counter, "invalid epoch counter") - require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets") - - require.EqualValues(t, []event.Event{ - governance.NewSyncEvent(ev.TxHash()), - ev, - }, eh.handledEvents, "invalid handled events") -} - -func TestAddPeer(t *testing.T) { - t.Parallel() - - nc := &testNetmapClient{ - contractAddress: util.Uint160{47}, - } - - proc, err := newTestProc(t, func(p *Params) { - p.NetmapClient = nc - }) - - require.NoError(t, err, "failed to create processor") - - var node netmap.NodeInfo - key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") - require.NoError(t, err, "failed to parse key") - node.SetPublicKey(key.Bytes()) - - ev := netmapEvent.AddPeer{ - NodeBytes: node.Marshal(), - Request: &payload.P2PNotaryRequest{ - MainTransaction: &transaction.Transaction{}, - }, - } - proc.handleAddPeer(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.Nil(t, nc.notaryInvokes, "invalid notary invokes") - - node.SetStatus(netmap.Online) - ev = netmapEvent.AddPeer{ - NodeBytes: node.Marshal(), - Request: &payload.P2PNotaryRequest{ - MainTransaction: &transaction.Transaction{}, - }, - } - proc.handleAddPeer(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.EqualValues(t, []notaryInvoke{ - { - contract: nc.contractAddress, - fee: 0, - nonce: ev.NotaryRequest().MainTransaction.Nonce, - vub: nil, - method: "addPeerIR", - args: []any{node.Marshal()}, - }, - }, nc.notaryInvokes, "invalid notary invokes") -} - -func TestUpdateState(t *testing.T) { - t.Parallel() - - ns := &testNodeStateSettings{ - maintAllowed: true, - } - nc := &testNetmapClient{} - - proc, err := newTestProc(t, func(p *Params) { - p.NetmapClient = nc - p.NodeStateSettings = ns - }) - - require.NoError(t, err, "failed to create processor") - - key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") - require.NoError(t, err, "failed to parse key") - - ev := netmapEvent.UpdatePeer{ - State: netmapContract.NodeStateOnline, - PubKey: key, - Request: &payload.P2PNotaryRequest{ - MainTransaction: &transaction.Transaction{}, - }, - } - proc.handleUpdateState(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - require.EqualValues(t, []*transaction.Transaction{ev.Request.MainTransaction}, nc.invokedTxs, "invalid transactions") -} - -func TestCleanupTick(t *testing.T) { - t.Parallel() - - nc := &testNetmapClient{ - contractAddress: util.Uint160{111}, - } - proc, err := newTestProc(t, - func(p *Params) { - p.NetmapClient = nc - p.CleanupEnabled = true - }, - ) - - require.NoError(t, err, "failed to create processor") - - key1Str := "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35" - proc.netmapSnapshot.lastAccess[key1Str] = epochStampWithNodeInfo{ - epochStamp: epochStamp{ - epoch: 95, - removeFlag: false, - }, - } - key2Str := "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3" - proc.netmapSnapshot.lastAccess[key2Str] = epochStampWithNodeInfo{ - epochStamp: epochStamp{ - epoch: 98, - removeFlag: false, - }, - } - - ev := netmapCleanupTick{ - epoch: 100, - txHash: util.Uint256{123}, - } - - proc.handleCleanupTick(context.Background(), ev) - - for proc.pool.Running() > 0 { - time.Sleep(10 * time.Millisecond) - } - - keyExp, err := keys.NewPublicKeyFromString(key1Str) - require.NoError(t, err, "failed to parse expired key") - - updExp := netmapclient.UpdatePeerPrm{} - updExp.SetKey(keyExp.Bytes()) - updExp.SetHash(ev.TxHash()) - - require.EqualValues(t, []notaryInvoke{ - { - contract: nc.contractAddress, - fee: 0, - nonce: uint32(ev.epoch), - vub: nil, - method: "updateStateIR", - args: []any{int64(v2netmap.Offline), keyExp.Bytes()}, - }, - }, nc.notaryInvokes, "invalid notary invokes") - require.True(t, proc.netmapSnapshot.lastAccess[key1Str].removeFlag, "invalid expired removed flag") - require.False(t, proc.netmapSnapshot.lastAccess[key2Str].removeFlag, "invalid non expired removed flag") -} - -func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { - ns := &testNodeStateSettings{} - es := &testEpochState{} - r := &testEpochResetter{} - as := &testAlphabetState{ - isAlphabet: true, - } - nc := &testNetmapClient{} - eh := &testEventHandler{} - - p := &Params{ - Log: test.NewLogger(t), - PoolSize: 1, - CleanupEnabled: false, - CleanupThreshold: 3, - NodeStateSettings: ns, - NodeValidator: &testValidator{}, - EpochState: es, - EpochTimer: r, - AlphabetState: as, - NetmapClient: nc, - NotaryDepositHandler: eh.Handle, - AlphabetSyncHandler: eh.Handle, - } - - nonDefault(p) - - return New(p) -} - -type testNodeStateSettings struct { - maintAllowed bool -} - -func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { - if s.maintAllowed { - return nil - } - return fmt.Errorf("maintenance mode not allowed") -} - -type testValidator struct{} - -func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error { - return nil -} - -type testEpochState struct { - counter uint64 - duration uint64 -} - -func (s *testEpochState) SetEpochCounter(c uint64) { - s.counter = c -} - -func (s *testEpochState) EpochCounter() uint64 { - return s.counter -} - -func (s *testEpochState) SetEpochDuration(d uint64) { - s.duration = d -} - -func (s *testEpochState) EpochDuration() uint64 { - return s.duration -} - -type testEpochResetter struct { - timers []uint32 -} - -func (r *testEpochResetter) ResetEpochTimer(t uint32) error { - r.timers = append(r.timers, t) - return nil -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet(context.Context) bool { - return s.isAlphabet -} - -type notaryInvoke struct { - contract util.Uint160 - fee fixedn.Fixed8 - nonce uint32 - vub *uint32 - method string - args []any -} - -type testNetmapClient struct { - contractAddress util.Uint160 - epochDuration uint64 - netmap *netmap.NetMap - txHeights map[util.Uint256]uint32 - - notaryInvokes []notaryInvoke - newEpochs []uint64 - invokedTxs []*transaction.Transaction -} - -func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { - c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{ - contract: contract, - fee: fee, - nonce: nonce, - vub: vub, - method: method, - args: args, - }) - return nil -} - -func (c *testNetmapClient) ContractAddress() util.Uint160 { - return c.contractAddress -} - -func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) { - return c.epochDuration, nil -} - -func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) { - if res, found := c.txHeights[h]; found { - return res, nil - } - return 0, fmt.Errorf("not found") -} - -func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { - return c.netmap, nil -} - -func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error { - c.newEpochs = append(c.newEpochs, epoch) - return nil -} - -func (c *testNetmapClient) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { - return true, nil -} - -func (c *testNetmapClient) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { - c.invokedTxs = append(c.invokedTxs, mainTx) - return nil -} - -type testEventHandler struct { - handledEvents []event.Event -} - -func (h *testEventHandler) Handle(_ context.Context, e event.Event) { - h.handledEvents = append(h.handledEvents, e) -} diff --git a/pkg/innerring/processors/netmap/internal_events.go b/pkg/innerring/processors/netmap/internal_events.go deleted file mode 100644 index 7ff6b40d5..000000000 --- a/pkg/innerring/processors/netmap/internal_events.go +++ /dev/null @@ -1,22 +0,0 @@ -package netmap - -import "github.com/nspcc-dev/neo-go/pkg/util" - -// netmapCleanupTick is a event to remove offline nodes. -type netmapCleanupTick struct { - epoch uint64 - - // txHash is used in notary environment - // for calculating unique but same for - // all notification receivers values. - txHash util.Uint256 -} - -// TxHash returns the hash of the TX that triggers -// synchronization process. -func (s netmapCleanupTick) TxHash() util.Uint256 { - return s.txHash -} - -// MorphEvent implements the Event interface. -func (netmapCleanupTick) MorphEvent() {} diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go deleted file mode 100644 index b81dc9989..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go +++ /dev/null @@ -1,93 +0,0 @@ -package locode - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -var errMissingRequiredAttr = errors.New("missing required attribute in DB record") - -// VerifyAndUpdate validates UN-LOCODE attribute of n -// and adds a group of related attributes. -// -// If n contains at least one of the LOCODE-derived attributes, -// an error is returned. -// -// If n contains UN-LOCODE attribute and its value does not -// match the UN/LOCODE format, an error is returned. -// -// New attributes are formed from the record of DB instance (Prm). -// If DB entry R was found w/o errors, new attributes are: -// - CountryCode: R.CountryCode().String(); -// - Country: R.CountryName(); -// - Location: Record.LocationName(); -// - SubDivCode: R.SubDivCode(); -// - SubDiv: R.SubDivName(); -// - Continent: R.Continent().String(). -// -// UN-LOCODE attribute remains untouched. -func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { - attrLocode := n.LOCODE() - if attrLocode == "" { - return nil - } - - lc, err := locode.FromString(attrLocode) - if err != nil { - return fmt.Errorf("invalid locode value: %w", err) - } - - record, err := v.db.Get(lc) - if err != nil { - return fmt.Errorf("could not get locode record from DB: %w", err) - } - - countryCode := record.CountryCode() - if countryCode == nil { - return errMissingRequiredAttr - } - - strCountryCode := countryCode.String() - if strCountryCode == "" { - return errMissingRequiredAttr - } - - countryName := record.CountryName() - if countryName == "" { - return errMissingRequiredAttr - } - - locationName := record.LocationName() - if locationName == "" { - return errMissingRequiredAttr - } - - continent := record.Continent() - if continent == nil { - return errMissingRequiredAttr - } - - continentName := continent.String() - if continentName == "" { - return errMissingRequiredAttr - } - - n.SetCountryCode(strCountryCode) - n.SetCountryName(countryName) - n.SetLocationName(locationName) - n.SetContinentName(continentName) - - if subDivCode := record.SubDivCode(); subDivCode != "" { - n.SetSubdivisionCode(subDivCode) - } - - if subDivName := record.SubDivName(); subDivName != "" { - n.SetSubdivisionName(subDivName) - } - - return nil -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go deleted file mode 100644 index fa2dd1ac1..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package locode_test - -import ( - "context" - "errors" - "fmt" - "testing" - - locodestd "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/stretchr/testify/require" -) - -type record struct { - *locodedb.Key - *locodedb.Record -} - -type db struct { - items map[locodestd.LOCODE]locode.Record -} - -func (x db) add(lc locodestd.LOCODE, rec locode.Record) { - x.items[lc] = rec -} - -func (x db) Get(lc *locodestd.LOCODE) (locode.Record, error) { - r, ok := x.items[*lc] - if !ok { - return nil, errors.New("record not found") - } - - return r, nil -} - -func addLocodeAttrValue(n *netmap.NodeInfo, val string) { - n.SetLOCODE(val) -} - -func addLocodeAttr(n *netmap.NodeInfo, lc locodestd.LOCODE) { - n.SetLOCODE(fmt.Sprintf("%s %s", lc[0], lc[1])) -} - -func nodeInfoWithSomeAttrs() *netmap.NodeInfo { - var n netmap.NodeInfo - - n.SetAttribute("key1", "val1") - n.SetAttribute("key2", "val2") - - return &n -} - -func TestValidator_VerifyAndUpdate(t *testing.T) { - db := &db{ - items: make(map[locodestd.LOCODE]locode.Record), - } - - // test record with valid but random values - r := locodestd.Record{ - LOCODE: locodestd.LOCODE{"RU", "MOW"}, - NameWoDiacritics: "Moskva", - SubDiv: "MSK", - } - - k, err := locodedb.NewKey(r.LOCODE) - require.NoError(t, err) - - rdb, err := locodedb.NewRecord(r) - require.NoError(t, err) - - rdb.SetCountryName("Russia") - rdb.SetSubDivName("Moskva oblast") - - var cont locodedb.Continent = locodedb.ContinentEurope - - rdb.SetContinent(&cont) - - rec := record{ - Key: k, - Record: rdb, - } - - db.add(r.LOCODE, rec) - - var p locode.Prm - - p.DB = db - - validator := locode.New(p) - - t.Run("w/o locode", func(t *testing.T) { - n := nodeInfoWithSomeAttrs() - - err := validator.VerifyAndUpdate(context.Background(), n) - require.NoError(t, err) - }) - - t.Run("w/ locode", func(t *testing.T) { - t.Run("invalid locode", func(t *testing.T) { - n := nodeInfoWithSomeAttrs() - - addLocodeAttrValue(n, "WRONG LOCODE") - - err := validator.VerifyAndUpdate(context.Background(), n) - require.Error(t, err) - }) - - t.Run("missing DB record", func(t *testing.T) { - n := nodeInfoWithSomeAttrs() - - addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"}) - - err := validator.VerifyAndUpdate(context.Background(), n) - require.Error(t, err) - }) - - n := nodeInfoWithSomeAttrs() - - addLocodeAttr(n, r.LOCODE) - - err := validator.VerifyAndUpdate(context.Background(), n) - require.NoError(t, err) - - require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode")) - require.Equal(t, rec.CountryName(), n.Attribute("Country")) - require.Equal(t, rec.LocationName(), n.Attribute("Location")) - require.Equal(t, rec.SubDivCode(), n.Attribute("SubDivCode")) - require.Equal(t, rec.SubDivName(), n.Attribute("SubDiv")) - require.Equal(t, rec.Continent().String(), n.Attribute("Continent")) - }) -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go deleted file mode 100644 index ba5db9205..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go +++ /dev/null @@ -1,59 +0,0 @@ -package locode - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" -) - -// Record is an interface of read-only -// FrostFS LOCODE database single entry. -type Record interface { - // CountryCode must return ISO 3166-1 alpha-2 - // country code. - // - // Must not return nil. - CountryCode() *locodedb.CountryCode - - // CountryName must return English short country name - // officially used by the ISO 3166 - // Maintenance Agency (ISO 3166/MA). - CountryName() string - - // LocationCode must return UN/LOCODE 3-character code - // for the location (numerals 2-9 may also - // be used). - // - // Must not return nil. - LocationCode() *locodedb.LocationCode - - // LocationName must return name of the location which - // have been allocated a UN/LOCODE without - // diacritic sign. - LocationName() string - - // SubDivCode Must return ISO 1-3 character alphabetic - // and/or numeric code for the administrative - // division of the country concerned. - SubDivCode() string - - // SubDivName must return subdivision name. - SubDivName() string - - // Continent must return existing continent where is - // the location. - // - // Must not return nil. - Continent() *locodedb.Continent -} - -// DB is an interface of read-only -// FrostFS LOCODE database. -type DB interface { - // Get must find the record that corresponds to - // LOCODE and provides the Record interface. - // - // Must return an error if Record is nil. - // - // LOCODE is always non-nil. - Get(*locode.LOCODE) (Record, error) -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/validator.go b/pkg/innerring/processors/netmap/nodevalidation/locode/validator.go deleted file mode 100644 index 47183423d..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/validator.go +++ /dev/null @@ -1,37 +0,0 @@ -package locode - -// Prm groups the required parameters of the Validator's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // FrostFS LOCODE database interface. - // - // Must not be nil. - DB DB -} - -// Validator is a utility that verifies and updates -// node attributes associated with its geographical location -// (LOCODE). -// -// For correct operation, the Validator must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the Validator is immediately ready to work through API. -type Validator struct { - db DB -} - -// New creates a new instance of the Validator. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Validator does not require additional -// initialization and is completely ready for work. -func New(prm Prm) *Validator { - return &Validator{ - db: prm.DB, - } -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go deleted file mode 100644 index 0e4628ac7..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go +++ /dev/null @@ -1,19 +0,0 @@ -package maddress - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// VerifyAndUpdate calls network.VerifyAddress. -func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { - err := network.VerifyMultiAddress(*n) - if err != nil { - return fmt.Errorf("could not verify multiaddress: %w", err) - } - - return nil -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/validator.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/validator.go deleted file mode 100644 index 8c351f70a..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/maddress/validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package maddress - -// Validator is an utility that verifies node -// multiaddress. -// -// For correct operation, the Validator must be created -// using the constructor (New). After successful creation, -// the Validator is immediately ready to work through API. -type Validator struct{} - -// New creates a new instance of the Validator. -// -// The created Validator does not require additional -// initialization and is completely ready for work. -func New() *Validator { - return &Validator{} -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go deleted file mode 100644 index 03c41a451..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Package state collects functionality for verifying states of network map members. - -NetMapCandidateValidator type provides an interface for checking the network -map candidates. -*/ -package state - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// ErrMaintenanceModeDisallowed is returned when maintenance mode is disallowed. -var ErrMaintenanceModeDisallowed = errors.New("maintenance mode is disallowed") - -// NetworkSettings encapsulates current settings of the FrostFS network and -// provides interface used for processing the network map candidates. -type NetworkSettings interface { - // MaintenanceModeAllowed checks if maintenance state of the storage nodes - // is allowed to be set, and returns: - // no error if allowed; - // ErrMaintenanceModeDisallowed if disallowed; - // other error if there are any problems with the check. - MaintenanceModeAllowed(ctx context.Context) error -} - -// NetMapCandidateValidator represents tool which checks state of nodes which -// are going to register in the FrostFS network (enter the network map). -// -// NetMapCandidateValidator can be instantiated using built-in var declaration -// and currently doesn't require any additional initialization. -// -// NetMapCandidateValidator implements -// github.com/nspcc-dev/frostfs-node/pkg/innerring/processors/netmap.NodeValidator. -type NetMapCandidateValidator struct { - netSettings NetworkSettings -} - -// SetNetworkSettings specifies provider of the NetworkSettings interface. -// MUST be called before any VerifyAndUpdate call. Parameter MUST NOT be nil. -func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSettings) { - x.netSettings = netSettings -} - -// VerifyAndUpdate checks state of the network map candidate described by -// netmap.NodeInfo parameter. Returns no error if status is correct, otherwise -// returns an error describing a violation of the rules: -// -// status MUST be either ONLINE or MAINTENANCE; -// if status is MAINTENANCE, then it SHOULD be allowed by the network. -// -// VerifyAndUpdate does not mutate the parameter in a binary format. -// MUST NOT be called before SetNetworkSettings. -// -// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. -func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error { - if node.Status().IsOnline() { - return nil - } - - if node.Status().IsMaintenance() { - return x.netSettings.MaintenanceModeAllowed(ctx) - } - - return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE") -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go deleted file mode 100644 index cbf48a710..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package state_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/stretchr/testify/require" -) - -// implements state.NetworkSettings for testing. -type testNetworkSettings struct { - disallowed bool -} - -func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error { - if x.disallowed { - return state.ErrMaintenanceModeDisallowed - } - - return nil -} - -func TestValidator_VerifyAndUpdate(t *testing.T) { - var vDefault state.NetMapCandidateValidator - var s testNetworkSettings - - vDefault.SetNetworkSettings(s) - - for _, testCase := range []struct { - name string - preparer func(*netmap.NodeInfo) // modifies zero instance - valid bool // is node valid after preparation - - validatorPreparer func(*state.NetMapCandidateValidator) // optionally modifies default validator - }{ - { - name: "UNDEFINED", - preparer: func(info *netmap.NodeInfo) {}, - valid: false, - }, - { - name: "ONLINE", - preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) }, - valid: true, - }, - { - name: "OFFLINE", - preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) }, - valid: false, - }, - { - name: "MAINTENANCE/allowed", - preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }, - valid: true, - }, - { - name: "MAINTENANCE/disallowed", - preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }, - valid: false, - validatorPreparer: func(v *state.NetMapCandidateValidator) { - var s testNetworkSettings - s.disallowed = true - - v.SetNetworkSettings(s) - }, - }, - } { - var node netmap.NodeInfo - - // prepare node - testCase.preparer(&node) - - // save binary representation for mutation check - binNode := node.Marshal() - - var v state.NetMapCandidateValidator - if testCase.validatorPreparer == nil { - v = vDefault - } else { - testCase.validatorPreparer(&v) - } - - err := v.VerifyAndUpdate(context.Background(), &node) - - if testCase.valid { - require.NoError(t, err, testCase.name) - } else { - require.Error(t, err, testCase.name) - } - - // check mutation - require.Equal(t, binNode, node.Marshal(), testCase.name) - } -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go deleted file mode 100644 index 3dbe98a8d..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/validator.go +++ /dev/null @@ -1,39 +0,0 @@ -package nodevalidation - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" - apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// CompositeValidator wraps `netmap.NodeValidator`s. -// -// For correct operation, CompositeValidator must be created -// using the constructor (New). After successful creation, -// the CompositeValidator is immediately ready to work through -// API. -type CompositeValidator struct { - validators []netmap.NodeValidator -} - -// New creates a new instance of the CompositeValidator. -// -// The created CompositeValidator does not require additional -// initialization and is completely ready for work. -func New(validators ...netmap.NodeValidator) *CompositeValidator { - return &CompositeValidator{validators} -} - -// VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators. -// -// If error appears, returns it immediately. -func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error { - for _, v := range c.validators { - if err := v.VerifyAndUpdate(ctx, ni); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go deleted file mode 100644 index 8f8cc17ff..000000000 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ /dev/null @@ -1,57 +0,0 @@ -package netmap - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" -) - -func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) - - return true - } - - err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { - key, err := keys.NewPublicKeyFromString(s) - if err != nil { - np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode, - zap.String("key", s)) - - return nil - } - - np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) - - // In notary environments we call UpdateStateIR method instead of UpdateState. - // It differs from UpdateState only by name, so we can do this in the same form. - // See https://github.com/nspcc-dev/frostfs-contract/issues/225 - const methodUpdateStateNotary = "updateStateIR" - - err = np.netmapClient.MorphNotaryInvoke( - ctx, - np.netmapClient.ContractAddress(), - 0, - uint32(ev.epoch), - nil, - methodUpdateStateNotary, - int64(v2netmap.Offline), key.Bytes(), - ) - if err != nil { - np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) - } - - return nil - }) - if err != nil { - np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache, - zap.Error(err)) - return false - } - - return true -} diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go deleted file mode 100644 index 7c78d24a5..000000000 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ /dev/null @@ -1,73 +0,0 @@ -package netmap - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "go.uber.org/zap" -) - -// Process new epoch notification by setting global epoch value and resetting -// local epoch timer. -func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { - epoch := ev.EpochNumber() - - epochDuration, err := np.netmapClient.EpochDuration(ctx) - if err != nil { - np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, - zap.Error(err)) - } else { - np.epochState.SetEpochDuration(epochDuration) - } - - np.epochState.SetEpochCounter(epoch) - - h, err := np.netmapClient.MorphTxHeight(ev.TxHash()) - if err != nil { - np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight, - zap.String("hash", ev.TxHash().StringLE()), - zap.Error(err)) - } - - if err := np.epochTimer.ResetEpochTimer(h); err != nil { - np.log.Warn(ctx, logs.NetmapCantResetEpochTimer, - zap.Error(err)) - } - - // get new netmap snapshot - networkMap, err := np.netmapClient.NetMap(ctx) - if err != nil { - np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, - zap.Error(err)) - - return false - } - - np.netmapSnapshot.update(*networkMap, epoch) - np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) - np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash())) - np.handleNotaryDeposit(ctx, ev) - - return true -} - -// Process new epoch tick by invoking new epoch method in network map contract. -func (np *Processor) processNewEpochTick(ctx context.Context) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick) - return true - } - - nextEpoch := np.epochState.EpochCounter() + 1 - np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) - - err := np.netmapClient.NewEpoch(ctx, nextEpoch) - if err != nil { - np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) - return false - } - - return true -} diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go deleted file mode 100644 index b5c727cc7..000000000 --- a/pkg/innerring/processors/netmap/process_peers.go +++ /dev/null @@ -1,127 +0,0 @@ -package netmap - -import ( - "context" - "encoding/hex" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "go.uber.org/zap" -) - -// Process add peer notification by sanity check of new node -// local epoch timer. -func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) - return true - } - - // check if notary transaction is valid, see #976 - tx := ev.NotaryRequest().MainTransaction - ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers) - if err != nil || !ok { - np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction, - zap.String("method", "netmap.AddPeer"), - zap.String("hash", tx.Hash().StringLE()), - zap.Error(err)) - return false - } - - // unmarshal node info - var nodeInfo netmap.NodeInfo - if err := nodeInfo.Unmarshal(ev.Node()); err != nil { - // it will be nice to have tx id at event structure to log it - np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate) - return false - } - - // validate and update node info - err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo) - if err != nil { - np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, - zap.Error(err), - ) - - return false - } - - // sort attributes to make it consistent - nodeInfo.SortAttributes() - - // marshal updated node info structure - nodeInfoBinary := nodeInfo.Marshal() - - keyString := netmap.StringifyPublicKey(nodeInfo) - - updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary) - - // `processAddPeer` reacts on `AddPeer` notification, `processNewEpoch` - on `NewEpoch`. - // This two notification produces in order - `NewEpoch` -> `AddPeer`. - // But there is no guarantee that code will be executed in the same order. - // That is why we need to perform `addPeerIR` only in case when node is online, - // because in scope of this method, contract set state `ONLINE` for the node. - if updated && nodeInfo.Status().IsOnline() { - np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate, - zap.String("key", keyString)) - - prm := netmapclient.AddPeerPrm{} - prm.SetNodeInfo(nodeInfo) - - // In notary environments we call AddPeerIR method instead of AddPeer. - // It differs from AddPeer only by name, so we can do this in the same form. - // See https://github.com/nspcc-dev/frostfs-contract/issues/154. - const methodAddPeerNotary = "addPeerIR" - - // create new notary request with the original nonce - err = np.netmapClient.MorphNotaryInvoke( - ctx, - np.netmapClient.ContractAddress(), - 0, - ev.NotaryRequest().MainTransaction.Nonce, - nil, - methodAddPeerNotary, - nodeInfoBinary, - ) - if err != nil { - np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) - return false - } - } - - return true -} - -// Process update peer notification by sending approval tx to the smart contract. -func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool { - if !np.alphabetState.IsAlphabet(ctx) { - np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) - return true - } - - // flag node to remove from local view, so it can be re-bootstrapped - // again before new epoch will tick - np.netmapSnapshot.flag(hex.EncodeToString(ev.PublicKey().Bytes())) - - var err error - - if ev.Maintenance() { - err = np.nodeStateSettings.MaintenanceModeAllowed(ctx) - if err != nil { - np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, - zap.Error(err), - ) - - return false - } - } - - if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil { - np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) - return false - } - - return true -} diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go deleted file mode 100644 index 277bca1c3..000000000 --- a/pkg/innerring/processors/netmap/processor.go +++ /dev/null @@ -1,222 +0,0 @@ -package netmap - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" -) - -type ( - // EpochTimerReseter is a callback interface for tickers component. - EpochTimerReseter interface { - ResetEpochTimer(uint32) error - } - - // EpochState is a callback interface for inner ring global state. - EpochState interface { - SetEpochCounter(uint64) - EpochCounter() uint64 - SetEpochDuration(uint64) - EpochDuration() uint64 - } - - // AlphabetState is a callback interface for inner ring global state. - AlphabetState interface { - IsAlphabet(context.Context) bool - } - - // NodeValidator wraps basic method of checking the correctness - // of information about the node and its finalization for adding - // to the network map. - NodeValidator interface { - // VerifyAndUpdate must verify and optionally update NodeInfo structure. - // - // Must return an error if NodeInfo input is invalid. - // Must return an error if it is not possible to correctly - // change the structure for sending to the network map. - // - // If no error occurs, the parameter must point to the - // ready-made NodeInfo structure. - VerifyAndUpdate(context.Context, *netmap.NodeInfo) error - } - - Client interface { - MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error - ContractAddress() util.Uint160 - EpochDuration(ctx context.Context) (uint64, error) - MorphTxHeight(h util.Uint256) (res uint32, err error) - NetMap(ctx context.Context) (*netmap.NetMap, error) - NewEpoch(ctx context.Context, epoch uint64) error - MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) - MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error - } - - // Processor of events produced by network map contract - // and new epoch ticker, because it is related to contract. - Processor struct { - log *logger.Logger - metrics metrics.Register - pool *ants.Pool - epochTimer EpochTimerReseter - epochState EpochState - alphabetState AlphabetState - - netmapClient Client - - netmapSnapshot cleanupTable - - handleAlphabetSync event.Handler - handleNotaryDeposit event.Handler - - nodeValidator NodeValidator - - nodeStateSettings state.NetworkSettings - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - Metrics metrics.Register - PoolSize int - NetmapClient Client - EpochTimer EpochTimerReseter - EpochState EpochState - AlphabetState AlphabetState - CleanupEnabled bool - CleanupThreshold uint64 // in epochs - - AlphabetSyncHandler event.Handler - NotaryDepositHandler event.Handler - - NodeValidator NodeValidator - - NodeStateSettings state.NetworkSettings - } -) - -const ( - newEpochNotification = "NewEpoch" -) - -// New creates network map contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/netmap: logger is not set") - case p.EpochTimer == nil: - return nil, errors.New("ir/netmap: epoch itmer is not set") - case p.EpochState == nil: - return nil, errors.New("ir/netmap: global state is not set") - case p.AlphabetState == nil: - return nil, errors.New("ir/netmap: global state is not set") - case p.AlphabetSyncHandler == nil: - return nil, errors.New("ir/netmap: alphabet sync handler is not set") - case p.NotaryDepositHandler == nil: - return nil, errors.New("ir/netmap: notary deposit handler is not set") - case p.NodeValidator == nil: - return nil, errors.New("ir/netmap: node validator is not set") - case p.NodeStateSettings == nil: - return nil, errors.New("ir/netmap: node state settings is not set") - } - - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err) - } - - metricsRegister := p.Metrics - if metricsRegister == nil { - metricsRegister = metrics.DefaultRegister{} - } - - return &Processor{ - log: p.Log, - metrics: metricsRegister, - pool: pool, - epochTimer: p.EpochTimer, - epochState: p.EpochState, - alphabetState: p.AlphabetState, - netmapClient: p.NetmapClient, - netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold), - - handleAlphabetSync: p.AlphabetSyncHandler, - - handleNotaryDeposit: p.NotaryDepositHandler, - - nodeValidator: p.NodeValidator, - - nodeStateSettings: p.NodeStateSettings, - }, nil -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return []event.NotificationHandlerInfo{ - { - Contract: np.netmapClient.ContractAddress(), - Type: newEpochNotification, - Parser: netmapEvent.ParseNewEpoch, - Handlers: []event.Handler{np.handleNewEpoch}, - }, - } -} - -// ListenerNotaryParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - var ( - p event.NotaryParserInfo - - pp = make([]event.NotaryParserInfo, 0, 2) - ) - - p.SetMempoolType(mempoolevent.TransactionAdded) - p.SetScriptHash(np.netmapClient.ContractAddress()) - - // new peer - p.SetRequestType(netmapEvent.AddPeerNotaryEvent) - p.SetParser(netmapEvent.ParseAddPeerNotary) - pp = append(pp, p) - - // update state - p.SetRequestType(netmapEvent.UpdateStateNotaryEvent) - p.SetParser(netmapEvent.ParseUpdatePeerNotary) - pp = append(pp, p) - - return pp -} - -// ListenerNotaryHandlers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - var ( - h event.NotaryHandlerInfo - - hh = make([]event.NotaryHandlerInfo, 0, 2) - ) - - h.SetMempoolType(mempoolevent.TransactionAdded) - h.SetScriptHash(np.netmapClient.ContractAddress()) - - // new peer - h.SetRequestType(netmapEvent.AddPeerNotaryEvent) - h.SetHandler(np.handleAddPeer) - hh = append(hh, h) - - // update state - h.SetRequestType(netmapEvent.UpdateStateNotaryEvent) - h.SetHandler(np.handleUpdateState) - hh = append(hh, h) - - return hh -} diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go deleted file mode 100644 index 310f12248..000000000 --- a/pkg/innerring/processors/netmap/wrappers.go +++ /dev/null @@ -1,63 +0,0 @@ -package netmap - -import ( - "context" - - netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -func NewNetmapClient(netmapClient *netmapclient.Client) Client { - return &netmapClientWrapper{ - netmapClient: netmapClient, - } -} - -type netmapClientWrapper struct { - netmapClient *netmapclient.Client -} - -func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error { - _, err := w.netmapClient.UpdatePeerState(ctx, p) - return err -} - -func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { - _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...) - return err -} - -func (w *netmapClientWrapper) ContractAddress() util.Uint160 { - return w.netmapClient.ContractAddress() -} - -func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) { - return w.netmapClient.EpochDuration(ctx) -} - -func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { - return w.netmapClient.Morph().TxHeight(h) -} - -func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) { - return w.netmapClient.NetMap(ctx) -} - -func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { - return w.netmapClient.NewEpoch(ctx, epoch) -} - -func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { - return w.netmapClient.Morph().IsValidScript(script, signers) -} - -func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error { - return w.netmapClient.AddPeer(ctx, p) -} - -func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { - return w.netmapClient.Morph().NotarySignAndInvokeTX(mainTx) -} diff --git a/pkg/innerring/processors/util.go b/pkg/innerring/processors/util.go deleted file mode 100644 index 364ffe25e..000000000 --- a/pkg/innerring/processors/util.go +++ /dev/null @@ -1,16 +0,0 @@ -package processors - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" - "github.com/panjf2000/ants/v2" -) - -func SubmitEvent(pool *ants.Pool, metrics metrics.Register, eventLabel string, eventProcessor func() bool) error { - return pool.Submit(func() { - start := time.Now() - success := eventProcessor() - metrics.AddEvent(time.Since(start), eventLabel, success) - }) -} diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go deleted file mode 100644 index 0ef771359..000000000 --- a/pkg/innerring/state.go +++ /dev/null @@ -1,208 +0,0 @@ -package innerring - -import ( - "context" - "fmt" - "sort" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -const voteMethod = "vote" - -var ( - persistateMainChainLastBlockKey = []byte("main_chain_last_processed_block") - persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block") -) - -// EpochCounter is a getter for a global epoch counter. -func (s *Server) EpochCounter() uint64 { - return s.epochCounter.Load() -} - -// SetEpochCounter is a setter for contract processors to update global -// epoch counter. -func (s *Server) SetEpochCounter(val uint64) { - s.epochCounter.Store(val) - if s.irMetrics != nil { - s.irMetrics.SetEpoch(val) - } -} - -// EpochDuration is a getter for a global epoch duration. -func (s *Server) EpochDuration() uint64 { - return s.epochDuration.Load() -} - -// SetEpochDuration is a setter for the Netmap processor to update global -// epoch duration. -func (s *Server) SetEpochDuration(val uint64) { - s.epochDuration.Store(val) -} - -// IsActive is a getter for a global active flag state. -func (s *Server) IsActive(ctx context.Context) bool { - return s.InnerRingIndex(ctx) >= 0 -} - -// IsAlphabet is a getter for a global alphabet flag state. -func (s *Server) IsAlphabet(ctx context.Context) bool { - return s.AlphabetIndex(ctx) >= 0 -} - -// InnerRingIndex is a getter for a global index of node in inner ring list. Negative -// index means that node is not in the inner ring list. -func (s *Server) InnerRingIndex(ctx context.Context) int { - index, err := s.statusIndex.InnerRingIndex(ctx) - if err != nil { - s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) - return -1 - } - - return int(index) -} - -// InnerRingSize is a getter for a global size of inner ring list. This value -// paired with inner ring index. -func (s *Server) InnerRingSize(ctx context.Context) int { - size, err := s.statusIndex.InnerRingSize(ctx) - if err != nil { - s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) - return 0 - } - - return int(size) -} - -// AlphabetIndex is a getter for a global index of node in alphabet list. -// Negative index means that node is not in the alphabet list. -func (s *Server) AlphabetIndex(ctx context.Context) int { - index, err := s.statusIndex.AlphabetIndex(ctx) - if err != nil { - s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) - return -1 - } - - return int(index) -} - -func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error { - validators := prm.Validators - - index := s.InnerRingIndex(ctx) - if s.contracts.alphabet.indexOutOfRange(index) { - s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) - - return nil - } - - if len(validators) == 0 { - s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) - - return nil - } - - epoch := s.EpochCounter() - - var ( - nonce uint32 = 1 - vub uint32 - vubP *uint32 - err error - ) - - if prm.Hash != nil { - nonce, vub, err = s.morphClient.CalculateNonceAndVUB(prm.Hash) - if err != nil { - return fmt.Errorf("could not calculate nonce and `validUntilBlock` values: %w", err) - } - vubP = &vub - } - - s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { - _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) - if err != nil { - s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, - zap.Int8("alphabet_index", int8(letter)), - zap.Uint64("epoch", epoch), - zap.Error(err)) - } - }) - - return nil -} - -// VoteForSidechainValidator calls vote method on alphabet contracts with -// the provided list of keys. -func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error { - sort.Sort(prm.Validators) - return s.voteForSidechainValidator(ctx, prm) -} - -// ResetEpochTimer resets the block timer that produces events to update epoch -// counter in the netmap contract. It is used to synchronize this even production -// based on the block with a notification of the last epoch. -func (s *Server) ResetEpochTimer(h uint32) error { - s.epochTimer.Tick(h) - return s.epochTimer.Reset() -} - -func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) { - s.healthStatus.Store(int32(hs)) - s.notifySystemd(ctx, hs) - if s.irMetrics != nil { - s.irMetrics.SetHealth(int32(hs)) - } -} - -func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { - if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { - s.notifySystemd(ctx, newSt) - if s.irMetrics != nil { - s.irMetrics.SetHealth(int32(newSt)) - } - } - return -} - -// HealthStatus returns the current health status of the IR application. -func (s *Server) HealthStatus() control.HealthStatus { - return control.HealthStatus(s.healthStatus.Load()) -} - -func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, error) { - persistPath := cfg.GetString("node.persistent_state.path") - persistStorage, err := state.NewPersistentStorage(persistPath) - if err != nil { - return nil, fmt.Errorf("persistent state init error: %w", err) - } - - return persistStorage, nil -} - -func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) { - if !s.sdNotify { - return - } - var err error - switch st { - case control.HealthStatus_READY: - err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled) - case control.HealthStatus_SHUTTING_DOWN: - err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled) - case control.HealthStatus_RECONFIGURING: - err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled) - default: - err = sdnotify.Status(fmt.Sprintf("%v", st)) - } - if err != nil { - s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) - } -} diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go deleted file mode 100644 index f60ca87c4..000000000 --- a/pkg/innerring/state_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package innerring - -import ( - "context" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestServerState(t *testing.T) { - keyStr := "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae" - commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{keyStr}) - require.NoError(t, err, "convert string to commitee public keys failed") - cf := &testCommiteeFetcher{ - keys: commiteeKeys, - } - - irKeys, err := keys.NewPublicKeysFromStrings([]string{keyStr}) - require.NoError(t, err, "convert string to IR public keys failed") - irf := &testIRFetcher{ - keys: irKeys, - } - - key, err := keys.NewPublicKeyFromString(keyStr) - require.NoError(t, err, "convert string to public key failed") - - require.NoError(t, err, "failed to create morph client") - srv := &Server{ - statusIndex: newInnerRingIndexer(cf, irf, key, time.Second), - morphClient: &client.Client{}, - } - - var epoch uint64 = 100 - srv.SetEpochCounter(epoch) - require.Equal(t, epoch, srv.EpochCounter(), "invalid epoch counter") - - var epochDuration uint64 = 15 - srv.SetEpochDuration(epochDuration) - require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration") - - var healthStatus control.HealthStatus = control.HealthStatus_READY - srv.setHealthStatus(context.Background(), healthStatus) - require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") - - require.True(t, srv.IsActive(context.Background()), "invalid IsActive result") - require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result") - require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index") - require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index") - require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index") -} diff --git a/pkg/innerring/timers/alphabet.go b/pkg/innerring/timers/alphabet.go deleted file mode 100644 index d4ac29e5a..000000000 --- a/pkg/innerring/timers/alphabet.go +++ /dev/null @@ -1,7 +0,0 @@ -package timers - -// NewAlphabetEmitTick is a event for gas emission from alphabet contract. -type NewAlphabetEmitTick struct{} - -// MorphEvent implements Event interface. -func (NewAlphabetEmitTick) MorphEvent() {} diff --git a/pkg/innerring/timers/epoch.go b/pkg/innerring/timers/epoch.go deleted file mode 100644 index 5ba2e404e..000000000 --- a/pkg/innerring/timers/epoch.go +++ /dev/null @@ -1,7 +0,0 @@ -package timers - -// NewEpochTick is a new epoch local ticker event. -type NewEpochTick struct{} - -// MorphEvent implements Event interface. -func (NewEpochTick) MorphEvent() {} diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go deleted file mode 100644 index a6c40f9fa..000000000 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ /dev/null @@ -1,129 +0,0 @@ -package blobovnicza - -import ( - "io/fs" - "os" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -// Blobovnicza represents the implementation of FrostFS Blobovnicza. -type Blobovnicza struct { - cfg - - dataSize atomic.Uint64 - itemsCount atomic.Uint64 - - boltDB *bbolt.DB - - opened bool - controlMtx sync.Mutex -} - -// Option is an option of Blobovnicza's constructor. -type Option func(*cfg) - -type cfg struct { - boltDBCfg - - fullSizeLimit uint64 - - objSizeLimit uint64 - - log *logger.Logger - - metrics Metrics -} - -type boltDBCfg struct { - perm fs.FileMode - - path string - - boltOptions *bbolt.Options -} - -func defaultCfg(c *cfg) { - *c = cfg{ - boltDBCfg: boltDBCfg{ - perm: os.ModePerm, // 0777 - boltOptions: &bbolt.Options{ - Timeout: 100 * time.Millisecond, - }, - }, - fullSizeLimit: 1 << 30, // 1GB - objSizeLimit: 1 << 20, // 1MB - log: logger.NewLoggerWrapper(zap.L()), - metrics: &NoopMetrics{}, - } -} - -// New creates and returns a new Blobovnicza instance. -func New(opts ...Option) *Blobovnicza { - var b Blobovnicza - - defaultCfg(&b.cfg) - - for i := range opts { - opts[i](&b.cfg) - } - - return &b -} - -// WithPath returns option to set system path to Blobovnicza. -func WithPath(path string) Option { - return func(c *cfg) { - c.path = path - } -} - -// WithPermissions returns an option to specify permission bits -// of Blobovnicza's system path. -func WithPermissions(perm fs.FileMode) Option { - return func(c *cfg) { - c.perm = perm - } -} - -// WithObjectSizeLimit returns an option to specify the maximum size -// of the objects stored in Blobovnicza. -func WithObjectSizeLimit(lim uint64) Option { - return func(c *cfg) { - c.objSizeLimit = lim - } -} - -// WithFullSizeLimit returns an option to set the maximum sum size -// of all stored objects. -func WithFullSizeLimit(lim uint64) Option { - return func(c *cfg) { - c.fullSizeLimit = lim - } -} - -// WithLogger returns an option to specify Blobovnicza's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - -// WithReadOnly returns an option to open Blobovnicza in read-only mode. -func WithReadOnly(ro bool) Option { - return func(c *cfg) { - c.boltOptions.ReadOnly = ro - } -} - -// WithMetrics returns an option to set metrics storage. -func WithMetrics(m Metrics) Option { - return func(c *cfg) { - c.metrics = m - } -} diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go deleted file mode 100644 index 95fdd844b..000000000 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package blobovnicza - -import ( - "context" - "crypto/rand" - "os" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func testPutGet(t *testing.T, blz *Blobovnicza, addr oid.Address, sz uint64, assertErrPut, assertErrGet func(error) bool) oid.Address { - // create binary object - data := make([]byte, sz) - rand.Read(data) - - var pPut PutPrm - pPut.SetAddress(addr) - pPut.SetMarshaledObject(data) - _, err := blz.Put(context.Background(), pPut) - if assertErrPut != nil { - require.True(t, assertErrPut(err)) - } else { - require.NoError(t, err) - } - - if assertErrGet != nil { - testGet(t, blz, addr, data, assertErrGet) - } - - return addr -} - -func testGet(t *testing.T, blz *Blobovnicza, addr oid.Address, expObj []byte, assertErr func(error) bool) { - var pGet GetPrm - pGet.SetAddress(addr) - - // try to read object from Blobovnicza - res, err := blz.Get(context.Background(), pGet) - if assertErr != nil { - require.True(t, assertErr(err)) - } else { - require.NoError(t, err) - } - - if assertErr == nil { - require.Equal(t, expObj, res.Object()) - } -} - -func TestBlobovnicza(t *testing.T) { - p := "./test_blz" - - sizeLim := uint64(256 * 1 << 10) // 256KB - objSizeLim := sizeLim / 2 - - // create Blobovnicza instance - blz := New( - WithPath(p), - WithObjectSizeLimit(objSizeLim), - WithFullSizeLimit(sizeLim), - WithLogger(test.NewLogger(t)), - ) - - defer os.Remove(p) - - // open Blobovnicza - require.NoError(t, blz.Open(context.Background())) - - // initialize Blobovnicza - require.NoError(t, blz.Init(context.Background())) - - // try to read non-existent address - testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound) - - filled := uint64(15 * 1 << 10) - - // test object 15KB - addr := testPutGet(t, blz, oidtest.Address(), filled, nil, nil) - - // remove the object - var dPrm DeletePrm - dPrm.SetAddress(addr) - - _, err := blz.Delete(context.Background(), dPrm) - require.NoError(t, err) - - // should return 404 - testGet(t, blz, addr, nil, client.IsErrObjectNotFound) - - // fill Blobovnicza fully - for ; filled < sizeLim; filled += objSizeLim { - testPutGet(t, blz, oidtest.Address(), objSizeLim, nil, nil) - } - - // blobovnizca accepts object event if full - testPutGet(t, blz, oidtest.Address(), 1024, func(err error) bool { - return err == nil - }, nil) - - require.NoError(t, blz.Close(context.Background())) -} diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go deleted file mode 100644 index 4947512cc..000000000 --- a/pkg/local_object_storage/blobovnicza/control.go +++ /dev/null @@ -1,183 +0,0 @@ -package blobovnicza - -import ( - "context" - "errors" - "fmt" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -// Open opens an internal database at the configured path with the configured permissions. -// -// If the database file does not exist, it will be created automatically. -// If blobovnicza is already open, does nothing. -func (b *Blobovnicza) Open(ctx context.Context) error { - b.controlMtx.Lock() - defer b.controlMtx.Unlock() - - if b.opened { - return nil - } - - b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB, - zap.String("path", b.path), - zap.Bool("ro", b.boltOptions.ReadOnly), - ) - - var err error - - if !b.boltOptions.ReadOnly { - err = util.MkdirAllX(filepath.Dir(b.path), b.perm) - if err != nil { - return err - } - } - - b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB, - zap.String("path", b.path), - zap.Stringer("permissions", b.perm), - ) - - b.boltDB, err = bbolt.Open(b.path, b.perm, b.boltOptions) - if err == nil { - b.opened = true - b.metrics.IncOpenBlobovniczaCount() - } - - return err -} - -// Init initializes internal database structure. -// -// If Blobovnicza is already initialized, no action is taken. -// Blobovnicza must be open, otherwise an error will return. -func (b *Blobovnicza) Init(ctx context.Context) error { - b.controlMtx.Lock() - defer b.controlMtx.Unlock() - - if !b.opened { - return errors.New("blobovnicza is not open") - } - - b.log.Debug(ctx, logs.BlobovniczaInitializing, - zap.Uint64("object size limit", b.objSizeLimit), - zap.Uint64("storage size limit", b.fullSizeLimit), - ) - - size := b.dataSize.Load() - items := b.itemsCount.Load() - if size != 0 || items != 0 { - b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) - return nil - } - - if !b.boltOptions.ReadOnly { - err := b.boltDB.Update(func(tx *bbolt.Tx) error { - return b.iterateBucketKeys(true, func(lower, upper uint64, key []byte) (bool, error) { - // create size range bucket - - rangeStr := stringifyBounds(lower, upper) - b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange, - zap.String("range", rangeStr)) - - _, err := tx.CreateBucketIfNotExists(key) - if err != nil { - return false, fmt.Errorf("(%T) could not create bucket for bounds %s: %w", - b, rangeStr, err) - } - - return false, nil - }) - }) - if err != nil { - return err - } - } - - return b.initializeCounters(ctx) -} - -func (b *Blobovnicza) ObjectsCount() uint64 { - return b.itemsCount.Load() -} - -func (b *Blobovnicza) initializeCounters(ctx context.Context) error { - var size uint64 - var items uint64 - var sizeExists bool - var itemsCountExists bool - - err := b.boltDB.View(func(tx *bbolt.Tx) error { - size, sizeExists = hasDataSize(tx) - items, itemsCountExists = hasItemsCount(tx) - - if sizeExists && itemsCountExists { - return nil - } - - return b.iterateAllDataBuckets(tx, func(_, _ uint64, b *bbolt.Bucket) (bool, error) { - return false, b.ForEach(func(k, v []byte) error { - size += uint64(len(k) + len(v)) - items++ - return nil - }) - }) - }) - if err != nil { - return fmt.Errorf("determine DB size: %w", err) - } - if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { - b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) - if err := b.boltDB.Update(func(tx *bbolt.Tx) error { - if err := saveDataSize(tx, size); err != nil { - return err - } - return saveItemsCount(tx, items) - }); err != nil { - b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) - return fmt.Errorf("save blobovnicza's size and items count: %w", err) - } - b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) - } - - b.dataSize.Store(size) - b.itemsCount.Store(items) - b.metrics.AddOpenBlobovniczaSize(size) - b.metrics.AddOpenBlobovniczaItems(items) - return nil -} - -// Close releases all internal database resources. -// -// If blobovnicza is already closed, does nothing. -func (b *Blobovnicza) Close(ctx context.Context) error { - b.controlMtx.Lock() - defer b.controlMtx.Unlock() - - if !b.opened { - return nil - } - - b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB, - zap.String("path", b.path), - ) - - if err := b.boltDB.Close(); err != nil { - return err - } - - b.metrics.DecOpenBlobovniczaCount() - b.metrics.SubOpenBlobovniczaSize(b.dataSize.Load()) - b.metrics.SubOpenBlobovniczaItems(b.itemsCount.Load()) - b.dataSize.Store(0) - b.itemsCount.Store(0) - - b.opened = false - - return nil -} diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go deleted file mode 100644 index 8f24b5675..000000000 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ /dev/null @@ -1,105 +0,0 @@ -package blobovnicza - -import ( - "context" - "errors" - "syscall" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// DeletePrm groups the parameters of Delete operation. -type DeletePrm struct { - addr oid.Address -} - -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct{} - -// SetAddress sets the address of the requested object. -func (p *DeletePrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// Delete removes an object from Blobovnicza by address. -// -// Returns any error encountered that -// did not allow to completely delete the object. -// -// Returns an error of type apistatus.ObjectNotFound if the object to be deleted is not in blobovnicza. -// -// Should not be called in read-only configuration. -func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Delete", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - - addrKey := addressKey(prm.addr) - - found := false - var sizeUpperBound uint64 - var sizeLowerBound uint64 - var dataSize uint64 - var recordSize uint64 - - err := b.boltDB.Update(func(tx *bbolt.Tx) error { - err := b.iterateAllDataBuckets(tx, func(lower, upper uint64, buck *bbolt.Bucket) (bool, error) { - objData := buck.Get(addrKey) - if objData == nil { - // object is not in bucket => continue iterating - return false, nil - } - dataSize = uint64(len(objData)) - sizeLowerBound = lower - sizeUpperBound = upper - recordSize = dataSize + uint64(len(addrKey)) - found = true - return true, buck.Delete(addrKey) - }) - if err != nil { - return err - } - if found { - return updateMeta(tx, func(count, size uint64) (uint64, uint64) { - if count > 0 { - count-- - } - if size >= recordSize { - size -= recordSize - } else { - size = 0 - } - return count, size - }) - } - return nil - }) - - if err == nil && !found { - return DeleteRes{}, new(apistatus.ObjectNotFound) - } - - if err == nil && found { - b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, - zap.String("binary size", stringifyByteSize(dataSize)), - zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), - ) - b.itemDeleted(recordSize) - } - - if errors.Is(err, syscall.ENOSPC) { - err = ErrNoSpace - } - - return DeleteRes{}, err -} diff --git a/pkg/local_object_storage/blobovnicza/errors.go b/pkg/local_object_storage/blobovnicza/errors.go deleted file mode 100644 index cff8c1776..000000000 --- a/pkg/local_object_storage/blobovnicza/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package blobovnicza - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - -// ErrNoSpace returned if blobovnicza failed to perform an operation because of syscall.ENOSPC. -var ErrNoSpace = logicerr.New("no space left on device with blobovnicza") diff --git a/pkg/local_object_storage/blobovnicza/exists.go b/pkg/local_object_storage/blobovnicza/exists.go deleted file mode 100644 index f7bc84d4a..000000000 --- a/pkg/local_object_storage/blobovnicza/exists.go +++ /dev/null @@ -1,43 +0,0 @@ -package blobovnicza - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Exists check if object with the specified address is stored in b. -func (b *Blobovnicza) Exists(ctx context.Context, addr oid.Address) (bool, error) { - exists := false - - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Exists", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - addrKey := addressKey(addr) - - err := b.boltDB.View(func(tx *bbolt.Tx) error { - return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error { - if isNonDataBucket(bucketName) { - return nil - } - exists = buck.Get(addrKey) != nil - if exists { - return errInterruptForEach - } - return nil - }) - }) - - if err == errInterruptForEach { - err = nil - } - return exists, err -} diff --git a/pkg/local_object_storage/blobovnicza/get.go b/pkg/local_object_storage/blobovnicza/get.go deleted file mode 100644 index 600323f55..000000000 --- a/pkg/local_object_storage/blobovnicza/get.go +++ /dev/null @@ -1,85 +0,0 @@ -package blobovnicza - -import ( - "bytes" - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// GetPrm groups the parameters of Get operation. -type GetPrm struct { - addr oid.Address -} - -// GetRes groups the resulting values of Get operation. -type GetRes struct { - obj []byte -} - -// SetAddress sets the address of the requested object. -func (p *GetPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// Object returns binary representation of the requested object. -func (p GetRes) Object() []byte { - return p.obj -} - -// special error for normal bbolt.Tx.ForEach interruption. -var errInterruptForEach = errors.New("interrupt for-each") - -// Get reads an object from Blobovnicza by address. -// -// Returns any error encountered that -// did not allow to completely read the object. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is not -// presented in Blobovnicza. -func (b *Blobovnicza) Get(ctx context.Context, prm GetPrm) (GetRes, error) { - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Get", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - - var ( - data []byte - addrKey = addressKey(prm.addr) - ) - - if err := b.boltDB.View(func(tx *bbolt.Tx) error { - return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error { - if isNonDataBucket(bucketName) { - return nil - } - - data = buck.Get(addrKey) - if data == nil { - return nil - } - - data = bytes.Clone(data) - - return errInterruptForEach - }) - }); err != nil && err != errInterruptForEach { - return GetRes{}, err - } - - if data == nil { - return GetRes{}, new(apistatus.ObjectNotFound) - } - - return GetRes{ - obj: data, - }, nil -} diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go deleted file mode 100644 index 5a382c159..000000000 --- a/pkg/local_object_storage/blobovnicza/get_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package blobovnicza - -import ( - "context" - "path/filepath" - "testing" - - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestBlobovnicza_Get(t *testing.T) { - t.Run("re-configure object size limit", func(t *testing.T) { - filename := filepath.Join(t.TempDir(), "blob") - - var blz *Blobovnicza - defer func() { require.NoError(t, blz.Close(context.Background())) }() - - fnInit := func(szLimit uint64) { - if blz != nil { - require.NoError(t, blz.Close(context.Background())) - } - - blz = New( - WithPath(filename), - WithObjectSizeLimit(szLimit), - ) - - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - } - - // initial distribution: [0:32K] (32K:64K] - fnInit(2 * firstBucketBound) - - addr := oidtest.Address() - obj := make([]byte, firstBucketBound+1) - - exists, err := blz.Exists(context.Background(), addr) - require.NoError(t, err) - require.False(t, exists) - - var prmPut PutPrm - prmPut.SetAddress(addr) - prmPut.SetMarshaledObject(obj) - - // place object to [32K:64K] bucket - _, err = blz.Put(context.Background(), prmPut) - require.NoError(t, err) - - var prmGet GetPrm - prmGet.SetAddress(addr) - - checkObj := func() { - res, err := blz.Get(context.Background(), prmGet) - require.NoError(t, err) - require.Equal(t, obj, res.Object()) - - exists, err := blz.Exists(context.Background(), addr) - require.NoError(t, err) - require.True(t, exists) - } - - // object should be available - checkObj() - - // new distribution (extended): [0:32K] (32K:64K] (64K:128K] - fnInit(3 * firstBucketBound) - - // object should be still available - checkObj() - - // new distribution (shrunk): [0:32K] - fnInit(firstBucketBound) - - // object should be still available - checkObj() - }) -} diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go deleted file mode 100644 index cd33b263c..000000000 --- a/pkg/local_object_storage/blobovnicza/iterate.go +++ /dev/null @@ -1,181 +0,0 @@ -package blobovnicza - -import ( - "bytes" - "context" - "fmt" - "math" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// iterateAllDataBuckets iterates all buckets in db -// -// If the maximum size of the object (b.objSizeLimit) has been changed to lower value, -// then there may be more buckets than the current limit of the object size. -func (b *Blobovnicza) iterateAllDataBuckets(tx *bbolt.Tx, f func(uint64, uint64, *bbolt.Bucket) (bool, error)) error { - return b.iterateBucketKeys(false, func(lower uint64, upper uint64, key []byte) (bool, error) { - buck := tx.Bucket(key) - if buck == nil { - return true, nil - } - - return f(lower, upper, buck) - }) -} - -func (b *Blobovnicza) iterateBucketKeys(useObjLimitBound bool, f func(uint64, uint64, []byte) (bool, error)) error { - return b.iterateBounds(useObjLimitBound, func(lower, upper uint64) (bool, error) { - return f(lower, upper, bucketKeyFromBounds(upper)) - }) -} - -func (b *Blobovnicza) iterateBounds(useObjLimitBound bool, f func(uint64, uint64) (bool, error)) error { - var objLimitBound uint64 = math.MaxUint64 - if useObjLimitBound { - objLimitBound = upperPowerOfTwo(b.objSizeLimit) - } - - for upper := firstBucketBound; upper <= max(objLimitBound, firstBucketBound); upper *= 2 { - var lower uint64 - - if upper != firstBucketBound { - lower = upper/2 + 1 - } - - if stop, err := f(lower, upper); err != nil { - return err - } else if stop { - break - } - } - - return nil -} - -// IterationElement represents a unit of elements through which Iterate operation passes. -type IterationElement struct { - addr oid.Address - - data []byte -} - -// ObjectData returns stored object in a binary representation. -func (x IterationElement) ObjectData() []byte { - return x.data -} - -// Address returns address of the stored object. -func (x IterationElement) Address() oid.Address { - return x.addr -} - -// IterationHandler is a generic processor of IterationElement. -type IterationHandler func(IterationElement) error - -// IteratePrm groups the parameters of Iterate operation. -type IteratePrm struct { - decodeAddresses bool - - withoutData bool - - handler IterationHandler - - ignoreErrors bool -} - -// DecodeAddresses sets flag to unmarshal object addresses. -func (x *IteratePrm) DecodeAddresses() { - x.decodeAddresses = true -} - -// WithoutData sets flag to not read data of the objects. -func (x *IteratePrm) WithoutData() { - x.withoutData = true -} - -// SetHandler sets handler to be called iteratively. -func (x *IteratePrm) SetHandler(h IterationHandler) { - x.handler = h -} - -// IgnoreErrors makes all errors to be ignored. -func (x *IteratePrm) IgnoreErrors() { - x.ignoreErrors = true -} - -// IterateRes groups the resulting values of Iterate operation. -type IterateRes struct{} - -// Iterate goes through all stored objects, and passes IterationElement to parameterized handler until error return. -// -// Decodes object addresses if DecodeAddresses was called. Don't read object data if WithoutData was called. -// -// Returns handler's errors directly. Returns nil after iterating finish. -// -// Handler should not retain object data. Handler must not be nil. -func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Iterate", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.Bool("decode_addresses", prm.decodeAddresses), - attribute.Bool("without_data", prm.withoutData), - attribute.Bool("ignore_errors", prm.ignoreErrors), - )) - defer span.End() - - var elem IterationElement - - if err := b.boltDB.View(func(tx *bbolt.Tx) error { - return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error { - if isNonDataBucket(bucketName) { - return nil - } - return buck.ForEach(func(k, v []byte) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if prm.decodeAddresses { - if err := addressFromKey(&elem.addr, k); err != nil { - if prm.ignoreErrors { - return nil - } - return fmt.Errorf("decode address key: %w", err) - } - } - - if !prm.withoutData { - elem.data = bytes.Clone(v) - } - - return prm.handler(elem) - }) - }) - }); err != nil { - return IterateRes{}, err - } - - return IterateRes{}, nil -} - -// IterateAddresses is a helper function which iterates over Blobovnicza and passes addresses of the objects to f. -func IterateAddresses(ctx context.Context, blz *Blobovnicza, f func(oid.Address) error) error { - var prm IteratePrm - - prm.DecodeAddresses() - prm.WithoutData() - - prm.SetHandler(func(elem IterationElement) error { - return f(elem.Address()) - }) - - _, err := blz.Iterate(ctx, prm) - - return err -} diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go deleted file mode 100644 index 717274781..000000000 --- a/pkg/local_object_storage/blobovnicza/iterate_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package blobovnicza - -import ( - "bytes" - "context" - "errors" - "path/filepath" - "testing" - - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -func TestBlobovniczaIterate(t *testing.T) { - filename := filepath.Join(t.TempDir(), "blob") - b := New(WithPath(filename)) - require.NoError(t, b.Open(context.Background())) - require.NoError(t, b.Init(context.Background())) - - data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}} - addr := oidtest.Address() - _, err := b.Put(context.Background(), PutPrm{addr: addr, objData: data[0]}) - require.NoError(t, err) - - require.NoError(t, b.boltDB.Update(func(tx *bbolt.Tx) error { - buck := tx.Bucket(bucketKeyFromBounds(firstBucketBound)) - return buck.Put([]byte("invalid address"), data[1]) - })) - - seen := make([][]byte, 0, 2) - inc := func(e IterationElement) error { - seen = append(seen, bytes.Clone(e.data)) - return nil - } - - _, err = b.Iterate(context.Background(), IteratePrm{handler: inc}) - require.NoError(t, err) - require.ElementsMatch(t, seen, data) - - seen = seen[:0] - _, err = b.Iterate(context.Background(), IteratePrm{handler: inc, decodeAddresses: true}) - require.Error(t, err) - - seen = seen[:0] - _, err = b.Iterate(context.Background(), IteratePrm{handler: inc, decodeAddresses: true, ignoreErrors: true}) - require.NoError(t, err) - require.ElementsMatch(t, seen, data[:1]) - - seen = seen[:0] - expectedErr := errors.New("stop iteration") - _, err = b.Iterate(context.Background(), IteratePrm{ - decodeAddresses: true, - handler: func(IterationElement) error { return expectedErr }, - ignoreErrors: true, - }) - require.ErrorIs(t, err, expectedErr) -} diff --git a/pkg/local_object_storage/blobovnicza/meta.go b/pkg/local_object_storage/blobovnicza/meta.go deleted file mode 100644 index 3316d4666..000000000 --- a/pkg/local_object_storage/blobovnicza/meta.go +++ /dev/null @@ -1,104 +0,0 @@ -package blobovnicza - -import ( - "bytes" - "encoding/binary" - - "go.etcd.io/bbolt" -) - -const ( - dataSizeAndItemsCountBufLength = 8 -) - -var ( - metaBucketName = []byte("META") - dataSizeKey = []byte("data_size") - itemsCountKey = []byte("items_count") -) - -func isNonDataBucket(bucketName []byte) bool { - return bytes.Equal(bucketName, incompletedMoveBucketName) || bytes.Equal(bucketName, metaBucketName) -} - -func hasDataSize(tx *bbolt.Tx) (uint64, bool) { - b := tx.Bucket(metaBucketName) - if b == nil { - return 0, false - } - v := b.Get(dataSizeKey) - if v == nil { - return 0, false - } - if len(v) != dataSizeAndItemsCountBufLength { - return 0, false - } - return binary.LittleEndian.Uint64(v), true -} - -func hasItemsCount(tx *bbolt.Tx) (uint64, bool) { - b := tx.Bucket(metaBucketName) - if b == nil { - return 0, false - } - v := b.Get(itemsCountKey) - if v == nil { - return 0, false - } - if len(v) != dataSizeAndItemsCountBufLength { - return 0, false - } - return binary.LittleEndian.Uint64(v), true -} - -func saveDataSize(tx *bbolt.Tx, size uint64) error { - b, err := tx.CreateBucketIfNotExists(metaBucketName) - if err != nil { - return err - } - buf := make([]byte, dataSizeAndItemsCountBufLength) - binary.LittleEndian.PutUint64(buf, size) - return b.Put(dataSizeKey, buf) -} - -func saveItemsCount(tx *bbolt.Tx, count uint64) error { - b, err := tx.CreateBucketIfNotExists(metaBucketName) - if err != nil { - return err - } - buf := make([]byte, dataSizeAndItemsCountBufLength) - binary.LittleEndian.PutUint64(buf, count) - return b.Put(itemsCountKey, buf) -} - -func updateMeta(tx *bbolt.Tx, updateValues func(count, size uint64) (uint64, uint64)) error { - b, err := tx.CreateBucketIfNotExists(metaBucketName) - if err != nil { - return err - } - - var count uint64 - var size uint64 - - v := b.Get(itemsCountKey) - if v != nil { - count = binary.LittleEndian.Uint64(v) - } - - v = b.Get(dataSizeKey) - if v != nil { - size = binary.LittleEndian.Uint64(v) - } - - count, size = updateValues(count, size) - - sizeBuf := make([]byte, dataSizeAndItemsCountBufLength) - binary.LittleEndian.PutUint64(sizeBuf, size) - if err := b.Put(dataSizeKey, sizeBuf); err != nil { - return err - } - - countBuf := make([]byte, dataSizeAndItemsCountBufLength) - binary.LittleEndian.PutUint64(countBuf, count) - return b.Put(itemsCountKey, countBuf) -} diff --git a/pkg/local_object_storage/blobovnicza/metrics.go b/pkg/local_object_storage/blobovnicza/metrics.go deleted file mode 100644 index 37352b083..000000000 --- a/pkg/local_object_storage/blobovnicza/metrics.go +++ /dev/null @@ -1,21 +0,0 @@ -package blobovnicza - -type Metrics interface { - IncOpenBlobovniczaCount() - DecOpenBlobovniczaCount() - - AddOpenBlobovniczaSize(size uint64) - SubOpenBlobovniczaSize(size uint64) - - AddOpenBlobovniczaItems(items uint64) - SubOpenBlobovniczaItems(items uint64) -} - -type NoopMetrics struct{} - -func (m *NoopMetrics) IncOpenBlobovniczaCount() {} -func (m *NoopMetrics) DecOpenBlobovniczaCount() {} -func (m *NoopMetrics) AddOpenBlobovniczaSize(uint64) {} -func (m *NoopMetrics) SubOpenBlobovniczaSize(uint64) {} -func (m *NoopMetrics) AddOpenBlobovniczaItems(uint64) {} -func (m *NoopMetrics) SubOpenBlobovniczaItems(uint64) {} diff --git a/pkg/local_object_storage/blobovnicza/move.go b/pkg/local_object_storage/blobovnicza/move.go deleted file mode 100644 index 420e22a48..000000000 --- a/pkg/local_object_storage/blobovnicza/move.go +++ /dev/null @@ -1,119 +0,0 @@ -package blobovnicza - -import ( - "context" - "errors" - "fmt" - "syscall" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var incompletedMoveBucketName = []byte("INCOMPLETED_MOVE") - -type MoveInfo struct { - Address oid.Address - TargetStorageID []byte -} - -func (b *Blobovnicza) PutMoveInfo(ctx context.Context, prm MoveInfo) error { - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.PutMoveInfo", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("target_storage_id", string(prm.TargetStorageID)), - )) - defer span.End() - - key := addressKey(prm.Address) - - err := b.boltDB.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(incompletedMoveBucketName) - if err != nil { - return err - } - - if err := bucket.Put(key, prm.TargetStorageID); err != nil { - return fmt.Errorf("(%T) failed to save move info: %w", b, err) - } - - return nil - }) - - if errors.Is(err, syscall.ENOSPC) { - err = ErrNoSpace - } - return err -} - -func (b *Blobovnicza) DropMoveInfo(ctx context.Context, address oid.Address) error { - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.DropMoveInfo", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.String("address", address.EncodeToString()), - )) - defer span.End() - - key := addressKey(address) - - err := b.boltDB.Update(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(incompletedMoveBucketName) - if bucket == nil { - return nil - } - - if err := bucket.Delete(key); err != nil { - return fmt.Errorf("(%T) failed to drop move info: %w", b, err) - } - - c := bucket.Cursor() - k, v := c.First() - bucketEmpty := k == nil && v == nil - if bucketEmpty { - return tx.DeleteBucket(incompletedMoveBucketName) - } - - return nil - }) - if errors.Is(err, syscall.ENOSPC) { - err = ErrNoSpace - } - return err -} - -func (b *Blobovnicza) ListMoveInfo(ctx context.Context) ([]MoveInfo, error) { - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.ListMoveInfo", - trace.WithAttributes( - attribute.String("path", b.path), - )) - defer span.End() - - var result []MoveInfo - if err := b.boltDB.View(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(incompletedMoveBucketName) - if bucket == nil { - return nil - } - return bucket.ForEach(func(k, v []byte) error { - var addr oid.Address - storageID := make([]byte, len(v)) - if err := addressFromKey(&addr, k); err != nil { - return err - } - copy(storageID, v) - result = append(result, MoveInfo{ - Address: addr, - TargetStorageID: storageID, - }) - return nil - }) - }); err != nil { - return nil, err - } - - return result, nil -} diff --git a/pkg/local_object_storage/blobovnicza/put.go b/pkg/local_object_storage/blobovnicza/put.go deleted file mode 100644 index ff223ba36..000000000 --- a/pkg/local_object_storage/blobovnicza/put.go +++ /dev/null @@ -1,113 +0,0 @@ -package blobovnicza - -import ( - "context" - "errors" - "fmt" - "syscall" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// PutPrm groups the parameters of Put operation. -type PutPrm struct { - addr oid.Address - - objData []byte - - force bool -} - -// PutRes groups the resulting values of Put operation. -type PutRes struct{} - -// SetAddress sets the address of the saving object. -func (p *PutPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetMarshaledObject sets binary representation of the object. -func (p *PutPrm) SetMarshaledObject(data []byte) { - p.objData = data -} - -// SetForce sets force option. -func (p *PutPrm) SetForce(f bool) { - p.force = f -} - -// Put saves an object in Blobovnicza. -// -// If binary representation of the object is not set, -// it is calculated via Marshal method. -// -// The size of the object MUST BE less that or equal to -// the size specified in WithObjectSizeLimit option. -// -// Returns any error encountered that -// did not allow to completely save the object. -// -// Returns ErrFull if blobovnicza is filled. -// -// Should not be called in read-only configuration. -func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) { - _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Put", - trace.WithAttributes( - attribute.String("path", b.path), - attribute.String("address", prm.addr.EncodeToString()), - attribute.Int("size", len(prm.objData)), - )) - defer span.End() - - sz := uint64(len(prm.objData)) - bucketName := bucketForSize(sz) - key := addressKey(prm.addr) - recordSize := sz + uint64(len(key)) - - err := b.boltDB.Batch(func(tx *bbolt.Tx) error { - buck := tx.Bucket(bucketName) - if buck == nil { - // expected to happen: - // - before initialization step (incorrect usage by design) - // - if DB is corrupted (in future this case should be handled) - // - blobovnicza's object size changed before rebuild (handled if prm.force flag specified) - if !prm.force { - return logicerr.Wrap(fmt.Errorf("(%T) bucket for size %d not created", b, sz)) - } - var err error - buck, err = tx.CreateBucket(bucketName) - if err != nil { - return fmt.Errorf("(%T) failed to create bucket for size %d: %w", b, sz, err) - } - } - - // save the object in bucket - if err := buck.Put(key, prm.objData); err != nil { - return fmt.Errorf("(%T) could not save object in bucket: %w", b, err) - } - - return updateMeta(tx, func(count, size uint64) (uint64, uint64) { - return count + 1, size + recordSize - }) - }) - if err == nil { - b.itemAdded(recordSize) - } else if errors.Is(err, syscall.ENOSPC) { - err = ErrNoSpace - } - - return PutRes{}, err -} - -func addressKey(addr oid.Address) []byte { - return []byte(addr.EncodeToString()) -} - -func addressFromKey(dst *oid.Address, data []byte) error { - return dst.DecodeString(string(data)) -} diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go deleted file mode 100644 index 9bbed0db5..000000000 --- a/pkg/local_object_storage/blobovnicza/sizes.go +++ /dev/null @@ -1,63 +0,0 @@ -package blobovnicza - -import ( - "encoding/binary" - "fmt" - "math" - "math/bits" - "strconv" -) - -const firstBucketBound = uint64(32 * 1 << 10) // 32KB - -func stringifyBounds(lower, upper uint64) string { - return fmt.Sprintf("[%s:%s]", - stringifyByteSize(lower), - stringifyByteSize(upper), - ) -} - -func stringifyByteSize(sz uint64) string { - return strconv.FormatUint(sz, 10) -} - -func bucketKeyFromBounds(upperBound uint64) []byte { - buf := make([]byte, binary.MaxVarintLen64) - - ln := binary.PutUvarint(buf, upperBound) - - return buf[:ln] -} - -func bucketForSize(sz uint64) []byte { - return bucketKeyFromBounds(upperPowerOfTwo(sz)) -} - -func upperPowerOfTwo(v uint64) uint64 { - if v <= firstBucketBound { - return firstBucketBound - } - return 1 << bits.Len64(v-1) -} - -func (b *Blobovnicza) itemAdded(itemSize uint64) { - b.dataSize.Add(itemSize) - b.itemsCount.Add(1) - b.metrics.AddOpenBlobovniczaSize(itemSize) - b.metrics.AddOpenBlobovniczaItems(1) -} - -func (b *Blobovnicza) itemDeleted(itemSize uint64) { - b.dataSize.Add(^(itemSize - 1)) - b.itemsCount.Add(math.MaxUint64) - b.metrics.SubOpenBlobovniczaSize(itemSize) - b.metrics.SubOpenBlobovniczaItems(1) -} - -func (b *Blobovnicza) IsFull() bool { - return b.dataSize.Load() >= b.fullSizeLimit -} - -func (b *Blobovnicza) FillPercent() int { - return int(100.0 * (float64(b.dataSize.Load()) / float64(b.fullSizeLimit))) -} diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go deleted file mode 100644 index d582fc5e4..000000000 --- a/pkg/local_object_storage/blobovnicza/sizes_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package blobovnicza - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSizes(t *testing.T) { - for _, item := range []struct { - sz uint64 // object size - - upperBound uint64 // upper bound of expected range - }{ - { - sz: 0, - upperBound: firstBucketBound, - }, - { - sz: firstBucketBound, - upperBound: firstBucketBound, - }, - { - sz: firstBucketBound + 1, - upperBound: 2 * firstBucketBound, - }, - { - sz: 2 * firstBucketBound, - upperBound: 2 * firstBucketBound, - }, - { - sz: 2*firstBucketBound + 1, - upperBound: 4 * firstBucketBound, - }, - } { - key := bucketForSize(item.sz) - require.Equal(t, bucketKeyFromBounds(item.upperBound), key) - } -} - -func BenchmarkUpperBound(b *testing.B) { - for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} { - b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { - for range b.N { - _ = upperPowerOfTwo(size) - } - }) - } -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go deleted file mode 100644 index dbaa7387a..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go +++ /dev/null @@ -1,209 +0,0 @@ -package blobovniczatree - -import ( - "context" - "path/filepath" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" -) - -type activeDB struct { - blz *blobovnicza.Blobovnicza - shDB *sharedDB -} - -func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza { - return db.blz -} - -func (db *activeDB) Close(ctx context.Context) { - db.shDB.Close(ctx) -} - -func (db *activeDB) SystemPath() string { - return db.shDB.SystemPath() -} - -// activeDBManager manages active blobovnicza instances (that is, those that are being used for Put). -// -// Uses dbManager for opening/closing sharedDB instances. -// Stores a reference to an open active sharedDB, so dbManager does not close it. -// When changing the active sharedDB, releases the reference to the previous active sharedDB. -type activeDBManager struct { - levelToActiveDBGuard *sync.RWMutex - levelToActiveDB map[string]*sharedDB - levelLock *utilSync.KeyLocker[string] - closed bool - - dbManager *dbManager - rootPath string -} - -func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager { - return &activeDBManager{ - levelToActiveDBGuard: &sync.RWMutex{}, - levelToActiveDB: make(map[string]*sharedDB), - levelLock: utilSync.NewKeyLocker[string](), - - dbManager: dbManager, - rootPath: rootPath, - } -} - -// GetOpenedActiveDBForLevel returns active DB for level. -// DB must be closed after use. -func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) { - activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath) - if err != nil { - return nil, err - } - if activeDB != nil { - return activeDB, nil - } - - return m.updateAndGetActive(ctx, lvlPath) -} - -func (m *activeDBManager) Open() { - m.levelToActiveDBGuard.Lock() - defer m.levelToActiveDBGuard.Unlock() - - m.closed = false -} - -func (m *activeDBManager) Close(ctx context.Context) { - m.levelToActiveDBGuard.Lock() - defer m.levelToActiveDBGuard.Unlock() - - for _, db := range m.levelToActiveDB { - db.Close(ctx) - } - m.levelToActiveDB = make(map[string]*sharedDB) - m.closed = true -} - -func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) { - m.levelToActiveDBGuard.RLock() - defer m.levelToActiveDBGuard.RUnlock() - - if m.closed { - return nil, errClosed - } - - db, ok := m.levelToActiveDB[lvlPath] - if !ok { - return nil, nil - } - - blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close() - if err != nil { - return nil, err - } - - if blz.IsFull() { - db.Close(ctx) - return nil, nil - } - - return &activeDB{ - blz: blz, - shDB: db, - }, nil -} - -func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) { - m.levelLock.Lock(lvlPath) - defer m.levelLock.Unlock(lvlPath) - - current, err := m.getCurrentActiveIfOk(ctx, lvlPath) - if err != nil { - return nil, err - } - if current != nil { - return current, nil - } - - nextShDB, err := m.getNextSharedDB(ctx, lvlPath) - if err != nil { - return nil, err - } - - if nextShDB == nil { - return nil, nil - } - - blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage - if err != nil { - return nil, err - } - return &activeDB{ - blz: blz, - shDB: nextShDB, - }, nil -} - -func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) { - var nextActiveDBIdx uint64 - hasActive, currentIdx := m.hasActiveDB(lvlPath) - if hasActive { - nextActiveDBIdx = currentIdx + 1 - } else { - hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(m.rootPath, lvlPath)) - if err != nil { - return nil, err - } - if hasDBs { - nextActiveDBIdx = maxIdx - } - } - - path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx)) - next := m.dbManager.GetByPath(path) - _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close() - if err != nil { - return nil, err - } - - previous, updated := m.replace(lvlPath, next) - if !updated && next != nil { - next.Close(ctx) // manager is closed, so don't hold active DB open - } - if updated && previous != nil { - previous.Close(ctx) - } - return next, nil -} - -func (m *activeDBManager) hasActiveDB(lvlPath string) (bool, uint64) { - m.levelToActiveDBGuard.RLock() - defer m.levelToActiveDBGuard.RUnlock() - - if m.closed { - return false, 0 - } - - db, ok := m.levelToActiveDB[lvlPath] - if !ok { - return false, 0 - } - return true, u64FromHexString(filepath.Base(db.SystemPath())) -} - -func (m *activeDBManager) replace(lvlPath string, shDB *sharedDB) (*sharedDB, bool) { - m.levelToActiveDBGuard.Lock() - defer m.levelToActiveDBGuard.Unlock() - - if m.closed { - return nil, false - } - - previous := m.levelToActiveDB[lvlPath] - if shDB == nil { - delete(m.levelToActiveDB, lvlPath) - } else { - m.levelToActiveDB[lvlPath] = shDB - } - return previous, true -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go deleted file mode 100644 index 3e8b9f07b..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ /dev/null @@ -1,176 +0,0 @@ -package blobovniczatree - -import ( - "context" - "errors" - "os" - "strconv" - "strings" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/hrw" -) - -// Blobovniczas represents the storage of the "small" objects. -// -// Each object is stored in Blobovnicza's (B-s). -// B-s are structured in a multilevel directory hierarchy -// with fixed depth and width (configured by BlobStor). -// -// Example (width = 4, depth = 3): -// -// x===============================x -// |[0] [1] [2] [3]| -// | \ / | -// | \ / | -// | \ / | -// | \ / | -// |[0] [1] [2] [3]| -// | | / | -// | | / | -// | | / | -// | | / | -// |[0](F) [1](A) [X] [X]| -// x===============================x -// -// Elements of the deepest level are B-s. -// B-s are allocated dynamically. At each moment of the time there is -// an active B (ex. A), set of already filled B-s (ex. F) and -// a list of not yet initialized B-s (ex. X). After filling the active B -// it becomes full, and next B becomes initialized and active. -// -// Active B and some of the full B-s are cached (LRU). All cached -// B-s are intitialized and opened. -// -// Object is saved as follows: -// 1. at each level, according to HRW, the next one is selected and -// dives into it until we reach the deepest; -// 2. at the B-s level object is saved to the active B. If active B -// is full, next B is opened, initialized and cached. If there -// is no more X candidates, goto 1 and process next level. -// -// After the object is saved in B, path concatenation is returned -// in system path format as B identifier (ex. "0/1/1" or "3/2/1"). -type Blobovniczas struct { - cfg - - commondbManager *dbManager - activeDBManager *activeDBManager - dbCache *dbCache - deleteProtectedObjects *addressMap - dbFilesGuard *sync.RWMutex - rebuildGuard *sync.RWMutex -} - -var _ common.Storage = (*Blobovniczas)(nil) - -var errPutFailed = errors.New("could not save the object in any blobovnicza") - -const ( - dbExtension = ".db" -) - -// NewBlobovniczaTree returns new instance of blobovniczas tree. -func NewBlobovniczaTree(ctx context.Context, opts ...Option) (blz *Blobovniczas) { - blz = new(Blobovniczas) - initConfig(&blz.cfg) - - for i := range opts { - opts[i](&blz.cfg) - } - - blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.readOnly, blz.metrics.Blobovnicza(), blz.log) - blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.rootPath) - blz.dbCache = newDBCache(ctx, blz.openedCacheSize, - blz.openedCacheTTL, blz.openedCacheExpInterval, blz.commondbManager) - blz.deleteProtectedObjects = newAddressMap() - blz.dbFilesGuard = &sync.RWMutex{} - blz.rebuildGuard = &sync.RWMutex{} - - return blz -} - -// returns hash of the object address. -func addressHash(addr *oid.Address, path string) uint64 { - var a string - - if addr != nil { - a = addr.EncodeToString() - } - - return hrw.StringHash(a + path) -} - -func u64ToHexString(ind uint64) string { - return strconv.FormatUint(ind, 16) -} - -func u64ToHexStringExt(ind uint64) string { - return strconv.FormatUint(ind, 16) + dbExtension -} - -func u64FromHexString(str string) uint64 { - v, err := strconv.ParseUint(strings.TrimSuffix(str, dbExtension), 16, 64) - if err != nil { - panic("blobovnicza name is not an index " + str) - } - - return v -} - -func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) { - entries, err := os.ReadDir(directory) - if os.IsNotExist(err) { // non initialized tree - return false, 0, nil - } - if err != nil { - return false, 0, err - } - if len(entries) == 0 { - return false, 0, nil - } - var hasDBs bool - var maxIdx uint64 - for _, e := range entries { - if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) { - continue - } - hasDBs = true - maxIdx = max(u64FromHexString(e.Name()), maxIdx) - } - return hasDBs, maxIdx, nil -} - -// Type is blobovniczatree storage type used in logs and configuration. -const Type = "blobovnicza" - -// Type implements common.Storage. -func (b *Blobovniczas) Type() string { - return Type -} - -// Path implements common.Storage. -func (b *Blobovniczas) Path() string { - return b.rootPath -} - -// SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { - b.compression = cc -} - -func (b *Blobovniczas) Compressor() *compression.Compressor { - return b.compression -} - -// SetReportErrorFunc implements common.Storage. -func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) { - b.reportError = f -} - -func (b *Blobovniczas) SetParentID(parentID string) { - b.metrics.SetParentID(parentID) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go deleted file mode 100644 index 04ff5120c..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go +++ /dev/null @@ -1,157 +0,0 @@ -package blobovniczatree - -import ( - "context" - "sync" - "time" - - utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" - cache "github.com/go-pkgz/expirable-cache/v3" -) - -// dbCache caches sharedDB instances that are NOT open for Put. -// -// Uses dbManager for opening/closing sharedDB instances. -// Stores a reference to a cached sharedDB, so dbManager does not close it. -type dbCache struct { - cacheGuard *sync.Mutex - cache cache.Cache[string, *sharedDB] - pathLock *utilSync.KeyLocker[string] // the order of locks is important: pathLock first, cacheGuard second - closed bool - nonCached map[string]struct{} - wg sync.WaitGroup - cancel context.CancelFunc - - dbManager *dbManager -} - -func newDBCache(parentCtx context.Context, size int, - ttl time.Duration, expInterval time.Duration, - dbManager *dbManager, -) *dbCache { - ch := cache.NewCache[string, *sharedDB](). - WithTTL(ttl).WithLRU().WithMaxKeys(size). - WithOnEvicted(func(_ string, db *sharedDB) { - db.Close(parentCtx) - }) - ctx, cancel := context.WithCancel(parentCtx) - res := &dbCache{ - cacheGuard: &sync.Mutex{}, - wg: sync.WaitGroup{}, - cancel: cancel, - cache: ch, - dbManager: dbManager, - pathLock: utilSync.NewKeyLocker[string](), - nonCached: make(map[string]struct{}), - } - if ttl > 0 { - res.wg.Add(1) - go func() { - ticker := time.NewTicker(expInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - res.wg.Done() - return - case <-ticker.C: - res.cacheGuard.Lock() - res.cache.DeleteExpired() - res.cacheGuard.Unlock() - } - } - }() - } - return res -} - -func (c *dbCache) Open() { - c.cacheGuard.Lock() - defer c.cacheGuard.Unlock() - - c.closed = false -} - -func (c *dbCache) Close() { - c.cacheGuard.Lock() - defer c.cacheGuard.Unlock() - c.cancel() - c.wg.Wait() - c.cache.Purge() - c.closed = true -} - -func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB { - value := c.getExisted(path) - if value != nil { - return value - } - return c.create(ctx, path) -} - -func (c *dbCache) EvictAndMarkNonCached(path string) { - c.pathLock.Lock(path) - defer c.pathLock.Unlock(path) - - c.cacheGuard.Lock() - defer c.cacheGuard.Unlock() - - c.cache.Remove(path) - c.nonCached[path] = struct{}{} -} - -func (c *dbCache) RemoveFromNonCached(path string) { - c.pathLock.Lock(path) - defer c.pathLock.Unlock(path) - - c.cacheGuard.Lock() - defer c.cacheGuard.Unlock() - - delete(c.nonCached, path) -} - -func (c *dbCache) getExisted(path string) *sharedDB { - c.cacheGuard.Lock() - defer c.cacheGuard.Unlock() - - if value, ok := c.cache.Get(path); ok { - return value - } else if value != nil { - c.cache.Invalidate(path) - } - return nil -} - -func (c *dbCache) create(ctx context.Context, path string) *sharedDB { - c.pathLock.Lock(path) - defer c.pathLock.Unlock(path) - - value := c.getExisted(path) - if value != nil { - return value - } - - value = c.dbManager.GetByPath(path) - - _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed - if err != nil { - return value - } - if added := c.put(path, value); !added { - value.Close(ctx) - } - return value -} - -func (c *dbCache) put(path string, db *sharedDB) bool { - c.cacheGuard.Lock() - defer c.cacheGuard.Unlock() - - _, isNonCached := c.nonCached[path] - - if isNonCached || c.closed { - return false - } - c.cache.Add(path, db) - return true -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go deleted file mode 100644 index f87f4a144..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package blobovniczatree - -import ( - "context" - "sync" - "sync/atomic" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/stretchr/testify/require" -) - -func TestBlobovniczaTree_Concurrency(t *testing.T) { - t.Parallel() - const n = 1000 - - st := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(1024), - WithBlobovniczaShallowWidth(10), - WithBlobovniczaShallowDepth(1), - WithRootPath(t.TempDir())) - require.NoError(t, st.Open(mode.ComponentReadWrite)) - require.NoError(t, st.Init()) - defer func() { - require.NoError(t, st.Close(context.Background())) - }() - - objGen := &testutil.SeqObjGenerator{ObjSize: 1} - - var cnt atomic.Int64 - var wg sync.WaitGroup - for range 1000 { - wg.Add(1) - go func() { - defer wg.Done() - for cnt.Add(1) <= n { - obj := objGen.Next() - addr := testutil.AddressFromObject(t, obj) - - raw, err := obj.Marshal() - require.NoError(t, err) - - _, err = st.Put(context.Background(), common.PutPrm{ - Address: addr, - RawData: raw, - }) - require.NoError(t, err) - - _, err = st.Get(context.Background(), common.GetPrm{Address: addr}) - require.NoError(t, err) - } - }() - } - - wg.Wait() -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go deleted file mode 100644 index a6c1ce368..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ /dev/null @@ -1,99 +0,0 @@ -package blobovniczatree - -import ( - "context" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -// Open opens blobovnicza tree. -func (b *Blobovniczas) Open(mode mode.ComponentMode) error { - b.readOnly = mode.ReadOnly() - b.metrics.SetMode(mode) - b.metrics.SetRebuildStatus(rebuildStatusNotStarted) - b.openManagers() - return nil -} - -// Init initializes blobovnicza tree. -// -// Should be called exactly once. -func (b *Blobovniczas) Init() error { - b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas) - - if b.readOnly { - b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) - return nil - } - - return b.initializeDBs(context.TODO()) -} - -func (b *Blobovniczas) initializeDBs(ctx context.Context) error { - err := util.MkdirAllX(b.rootPath, b.perm) - if err != nil { - return err - } - - eg, egCtx := errgroup.WithContext(ctx) - if b.blzInitWorkerCount > 0 { - eg.SetLimit(b.blzInitWorkerCount + 1) - } - eg.Go(func() error { - return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { - eg.Go(func() error { - p = strings.TrimSuffix(p, rebuildSuffix) - shBlz := b.getBlobovniczaWithoutCaching(p) - blz, err := shBlz.Open(egCtx) - if err != nil { - return err - } - defer shBlz.Close(egCtx) - - moveInfo, err := blz.ListMoveInfo(egCtx) - if err != nil { - return err - } - for _, move := range moveInfo { - b.deleteProtectedObjects.Add(move.Address) - } - - b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) - return nil - }) - return false, nil - }) - }) - return eg.Wait() -} - -func (b *Blobovniczas) openManagers() { - b.commondbManager.Open() // order important - b.activeDBManager.Open() - b.dbCache.Open() -} - -// Close implements common.Storage. -func (b *Blobovniczas) Close(ctx context.Context) error { - b.dbCache.Close() // order important - b.activeDBManager.Close(ctx) - b.commondbManager.Close() - - return nil -} - -// returns blobovnicza with path p -// -// If blobovnicza is already cached, instance from cache is returned w/o changes. -func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB { - return b.dbCache.GetOrCreate(ctx, p) -} - -func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB { - return b.commondbManager.GetByPath(p) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go deleted file mode 100644 index 7db1891f9..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package blobovniczatree - -import ( - "context" - "os" - "path" - "strconv" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { - t.Parallel() - - rootDir := t.TempDir() - - blz := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaShallowDepth(3), - WithBlobovniczaShallowWidth(5), - WithRootPath(rootDir), - ) - - require.NoError(t, blz.Open(mode.ComponentReadWrite)) - require.NoError(t, blz.Init()) - - obj35 := blobstortest.NewObject(10 * 1024) - addr35 := objectCore.AddressOf(obj35) - raw, err := obj35.Marshal() - require.NoError(t, err) - - pRes35, err := blz.Put(context.Background(), common.PutPrm{ - Address: addr35, - Object: obj35, - RawData: raw, - }) - require.NoError(t, err) - - gRes, err := blz.Get(context.Background(), common.GetPrm{ - Address: addr35, - StorageID: pRes35.StorageID, - }) - require.NoError(t, err) - require.EqualValues(t, obj35, gRes.Object) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr35, - }) - require.NoError(t, err) - require.EqualValues(t, obj35, gRes.Object) - - require.NoError(t, blz.Close(context.Background())) - - // change depth and width - blz = NewBlobovniczaTree( - context.Background(), - WithBlobovniczaShallowDepth(5), - WithBlobovniczaShallowWidth(2), - WithRootPath(rootDir), - ) - - require.NoError(t, blz.Open(mode.ComponentReadWrite)) - require.NoError(t, blz.Init()) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr35, - StorageID: pRes35.StorageID, - }) - require.NoError(t, err) - require.EqualValues(t, obj35, gRes.Object) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr35, - }) - require.NoError(t, err) - require.EqualValues(t, obj35, gRes.Object) - - obj52 := blobstortest.NewObject(10 * 1024) - addr52 := objectCore.AddressOf(obj52) - raw, err = obj52.Marshal() - require.NoError(t, err) - - pRes52, err := blz.Put(context.Background(), common.PutPrm{ - Address: addr52, - Object: obj52, - RawData: raw, - }) - require.NoError(t, err) - - require.NoError(t, blz.Close(context.Background())) - - // change depth and width back - blz = NewBlobovniczaTree( - context.Background(), - WithBlobovniczaShallowDepth(3), - WithBlobovniczaShallowWidth(5), - WithRootPath(rootDir), - ) - require.NoError(t, blz.Open(mode.ComponentReadWrite)) - require.NoError(t, blz.Init()) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr35, - StorageID: pRes35.StorageID, - }) - require.NoError(t, err) - require.EqualValues(t, obj35, gRes.Object) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr35, - }) - require.NoError(t, err) - require.EqualValues(t, obj35, gRes.Object) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr52, - StorageID: pRes52.StorageID, - }) - require.NoError(t, err) - require.EqualValues(t, obj52, gRes.Object) - - gRes, err = blz.Get(context.Background(), common.GetPrm{ - Address: addr52, - }) - require.NoError(t, err) - require.EqualValues(t, obj52, gRes.Object) - - require.NoError(t, blz.Close(context.Background())) -} - -func TestInitBlobovniczasInitErrorType(t *testing.T) { - t.Parallel() - - rootDir := t.TempDir() - - for idx := 0; idx < 10; idx++ { - f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db")) - require.NoError(t, err) - _, err = f.Write([]byte("invalid db")) - require.NoError(t, err) - require.NoError(t, f.Close()) - - f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix)) - require.NoError(t, err) - require.NoError(t, f.Close()) - } - - blz := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaShallowDepth(1), - WithBlobovniczaShallowWidth(1), - WithRootPath(rootDir), - ) - - require.NoError(t, blz.Open(mode.ComponentReadWrite)) - err := blz.Init() - require.Contains(t, err.Error(), "open blobovnicza") - require.Contains(t, err.Error(), "invalid database") - require.NoError(t, blz.Close(context.Background())) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go deleted file mode 100644 index b83849c77..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go +++ /dev/null @@ -1,38 +0,0 @@ -package blobovniczatree - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" -) - -func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) { - var ( - success bool - startedAt = time.Now() - ) - defer func() { - b.metrics.ObjectsCount(time.Since(startedAt), success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount") - defer span.End() - - var result uint64 - err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) { - shDB := b.getBlobovniczaWithoutCaching(p) - blz, err := shDB.Open(ctx) - if err != nil { - return true, err - } - defer shDB.Close(ctx) - - result += blz.ObjectsCount() - return false, nil - }) - if err != nil { - return 0, err - } - return result, nil -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go deleted file mode 100644 index d096791c3..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ /dev/null @@ -1,127 +0,0 @@ -package blobovniczatree - -import ( - "context" - "encoding/hex" - "errors" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var ( - errObjectIsDeleteProtected = errors.New("object is delete protected") - deleteRes = common.DeleteRes{} -) - -// Delete deletes object from blobovnicza tree. -// -// If blobocvnicza ID is specified, only this blobovnicza is processed. -// Otherwise, all Blobovniczas are processed descending weight. -func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res common.DeleteRes, err error) { - var ( - success = false - startedAt = time.Now() - ) - defer func() { - b.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Delete", - trace.WithAttributes( - attribute.String("path", b.rootPath), - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - )) - defer span.End() - - if b.readOnly { - return deleteRes, common.ErrReadOnly - } - - if b.rebuildGuard.TryRLock() { - defer b.rebuildGuard.RUnlock() - } else { - return deleteRes, errRebuildInProgress - } - - if b.deleteProtectedObjects.Contains(prm.Address) { - return deleteRes, errObjectIsDeleteProtected - } - - var bPrm blobovnicza.DeletePrm - bPrm.SetAddress(prm.Address) - - if prm.StorageID != nil { - id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(ctx, id.Path()) - blz, err := shBlz.Open(ctx) - if err != nil { - return res, err - } - defer shBlz.Close(ctx) - - if res, err = b.deleteObject(ctx, blz, bPrm); err == nil { - success = true - } - return res, err - } - - objectFound := false - - err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) { - res, err = b.deleteObjectFromLevel(ctx, bPrm, p) - if err != nil { - if !client.IsErrObjectNotFound(err) { - b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, - zap.String("level", p), - zap.Error(err), - ) - } - } - - if err == nil { - objectFound = true - } - - // abort iterator if found, otherwise process all Blobovniczas - return err == nil, nil - }) - - if err == nil && !objectFound { - // not found in any blobovnicza - return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - success = err == nil - - return -} - -// tries to delete object from particular blobovnicza. -// -// returns no error if object was removed from some blobovnicza of the same level. -func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) { - shBlz := b.getBlobovnicza(ctx, blzPath) - blz, err := shBlz.Open(ctx) - if err != nil { - return deleteRes, err - } - defer shBlz.Close(ctx) - - return b.deleteObject(ctx, blz, prm) -} - -// removes object from blobovnicza and returns common.DeleteRes. -func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) { - _, err := blz.Delete(ctx, prm) - return deleteRes, err -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/errors.go b/pkg/local_object_storage/blobstor/blobovniczatree/errors.go deleted file mode 100644 index be0fd81c3..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package blobovniczatree - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -var errClosed = logicerr.New("blobvnicza is closed") - -func isErrOutOfRange(err error) bool { - var target *apistatus.ObjectOutOfRange - return errors.As(err, &target) -} - -func isLogical(err error) bool { - return errors.As(err, new(logicerr.Logical)) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go deleted file mode 100644 index 0c5e48821..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ /dev/null @@ -1,69 +0,0 @@ -package blobovniczatree - -import ( - "context" - "encoding/hex" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Exists implements common.Storage. -func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { - var ( - startedAt = time.Now() - success = false - found = false - ) - defer func() { - b.metrics.Exists(time.Since(startedAt), success, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Exists", - trace.WithAttributes( - attribute.String("path", b.rootPath), - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - )) - defer span.End() - - if prm.StorageID != nil { - id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(ctx, id.Path()) - blz, err := shBlz.Open(ctx) - if err != nil { - return common.ExistsRes{}, err - } - defer shBlz.Close(ctx) - - exists, err := blz.Exists(ctx, prm.Address) - return common.ExistsRes{Exists: exists}, err - } - - var gPrm blobovnicza.GetPrm - gPrm.SetAddress(prm.Address) - - err := b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) { - _, err := b.getObjectFromLevel(ctx, gPrm, p) - if err != nil { - if !client.IsErrObjectNotFound(err) { - b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, - zap.String("level", p), - zap.Error(err)) - } - } - - found = err == nil - return found, nil - }) - - success = err == nil - return common.ExistsRes{Exists: found}, err -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go deleted file mode 100644 index df2b4ffe5..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package blobovniczatree - -import ( - "bytes" - "context" - "os" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/stretchr/testify/require" -) - -func TestExistsInvalidStorageID(t *testing.T) { - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(1024), - WithBlobovniczaShallowWidth(2), - WithBlobovniczaShallowDepth(2), - WithRootPath(dir), - WithBlobovniczaSize(1<<20)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - defer func() { require.NoError(t, b.Close(context.Background())) }() - - obj := blobstortest.NewObject(1024) - addr := object.AddressOf(obj) - d, err := obj.Marshal() - require.NoError(t, err) - - putRes, err := b.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true}) - require.NoError(t, err) - - t.Run("valid but wrong storage id", func(t *testing.T) { - // "0/X/Y" <-> "1/X/Y" - storageID := bytes.Clone(putRes.StorageID) - if storageID[0] == '0' { - storageID[0]++ - } else { - storageID[0]-- - } - - res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID}) - require.NoError(t, err) - require.False(t, res.Exists) - }) - - t.Run("valid id but corrupted file", func(t *testing.T) { - relBadFileDir := filepath.Join("9", "0") - badFileName := "0" - - // An invalid boltdb file is created so that it returns an error when opened - require.NoError(t, os.MkdirAll(filepath.Join(dir, relBadFileDir), os.ModePerm)) - require.NoError(t, os.WriteFile(filepath.Join(dir, relBadFileDir, badFileName+".db"), []byte("not a boltdb file content"), 0o777)) - - res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: []byte(filepath.Join(relBadFileDir, badFileName))}) - require.Error(t, err) - require.False(t, res.Exists) - }) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go deleted file mode 100644 index 9244d765c..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package blobovniczatree - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" -) - -func TestGeneric(t *testing.T) { - const maxObjectSize = 1 << 16 - - helper := func(t *testing.T, dir string) common.Storage { - return NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(maxObjectSize), - WithBlobovniczaShallowWidth(2), - WithBlobovniczaShallowDepth(2), - WithRootPath(dir), - WithBlobovniczaSize(1<<20)) - } - - newTree := func(t *testing.T) common.Storage { - return helper(t, t.TempDir()) - } - - blobstortest.TestAll(t, newTree, 1024, maxObjectSize) - - t.Run("info", func(t *testing.T) { - dir := t.TempDir() - blobstortest.TestInfo(t, func(t *testing.T) common.Storage { - return helper(t, dir) - }, Type, dir) - }) -} - -func TestControl(t *testing.T) { - const maxObjectSize = 2048 - - newTree := func(t *testing.T) common.Storage { - return NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(maxObjectSize), - WithBlobovniczaShallowWidth(2), - WithBlobovniczaShallowDepth(2), - WithRootPath(t.TempDir()), - WithBlobovniczaSize(1<<20)) - } - - blobstortest.TestControl(t, newTree, 1024, maxObjectSize) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go deleted file mode 100644 index e5c83e5f2..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ /dev/null @@ -1,126 +0,0 @@ -package blobovniczatree - -import ( - "context" - "encoding/hex" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Get reads object from blobovnicza tree. -// -// If blobocvnicza ID is specified, only this blobovnicza is processed. -// Otherwise, all Blobovniczas are processed descending weight. -func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.GetRes, err error) { - var ( - startedAt = time.Now() - success = false - size = 0 - ) - defer func() { - b.metrics.Get(time.Since(startedAt), size, success, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Get", - trace.WithAttributes( - attribute.String("path", b.rootPath), - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - attribute.Bool("raw", prm.Raw), - )) - defer span.End() - - var bPrm blobovnicza.GetPrm - bPrm.SetAddress(prm.Address) - - if prm.StorageID != nil { - id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(ctx, id.Path()) - blz, err := shBlz.Open(ctx) - if err != nil { - return res, err - } - defer shBlz.Close(ctx) - - res, err = b.getObject(ctx, blz, bPrm) - if err == nil { - success = true - size = len(res.RawData) - } - return res, err - } - - err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) { - res, err = b.getObjectFromLevel(ctx, bPrm, p) - if err != nil { - if !client.IsErrObjectNotFound(err) { - b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, - zap.String("level", p), - zap.Error(err), - ) - } - } - - // abort iterator if found, otherwise process all Blobovniczas - return err == nil, nil - }) - - if err == nil && res.Object == nil { - // not found in any blobovnicza - return res, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - success = true - size = len(res.RawData) - - return -} - -// tries to read object from particular blobovnicza. -// -// returns error if object could not be read from any blobovnicza of the same level. -func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) { - // open blobovnicza (cached inside) - shBlz := b.getBlobovnicza(ctx, blzPath) - blz, err := shBlz.Open(ctx) - if err != nil { - return common.GetRes{}, err - } - defer shBlz.Close(ctx) - - return b.getObject(ctx, blz, prm) -} - -// reads object from blobovnicza and returns GetSmallRes. -func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.GetPrm) (common.GetRes, error) { - res, err := blz.Get(ctx, prm) - if err != nil { - return common.GetRes{}, err - } - - // decompress the data - data, err := b.compression.Decompress(res.Object()) - if err != nil { - return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) - } - - // unmarshal the object - obj := objectSDK.New() - if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) - } - - return common.GetRes{Object: obj, RawData: data}, nil -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go deleted file mode 100644 index 27d13f4f3..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ /dev/null @@ -1,151 +0,0 @@ -package blobovniczatree - -import ( - "context" - "encoding/hex" - "fmt" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// GetRange reads range of object payload data from blobovnicza tree. -// -// If blobocvnicza ID is specified, only this blobovnicza is processed. -// Otherwise, all Blobovniczas are processed descending weight. -func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (res common.GetRangeRes, err error) { - var ( - startedAt = time.Now() - success = false - size = 0 - ) - defer func() { - b.metrics.GetRange(time.Since(startedAt), size, success, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.GetRange", - trace.WithAttributes( - attribute.String("path", b.rootPath), - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)), - attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)), - )) - defer span.End() - - if prm.StorageID != nil { - id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(ctx, id.Path()) - blz, err := shBlz.Open(ctx) - if err != nil { - return common.GetRangeRes{}, err - } - defer shBlz.Close(ctx) - - res, err := b.getObjectRange(ctx, blz, prm) - if err == nil { - size = len(res.Data) - success = true - } - return res, err - } - - objectFound := false - - err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) { - res, err = b.getRangeFromLevel(ctx, prm, p) - if err != nil { - outOfBounds := isErrOutOfRange(err) - if !outOfBounds && !client.IsErrObjectNotFound(err) { - b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, - zap.String("level", p), - zap.Error(err)) - } - if outOfBounds { - return true, err - } - } - - objectFound = err == nil - - // abort iterator if found, otherwise process all Blobovniczas - return err == nil, nil - }) - - if err == nil && !objectFound { - // not found in any blobovnicza - return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - if err == nil { - success = true - size = len(res.Data) - } - - return -} - -// tries to read range of object payload data from particular blobovnicza. -// -// returns error if object could not be read from any blobovnicza of the same level. -func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) { - // open blobovnicza (cached inside) - shBlz := b.getBlobovnicza(ctx, blzPath) - blz, err := shBlz.Open(ctx) - if err != nil { - return common.GetRangeRes{}, err - } - defer shBlz.Close(ctx) - - return b.getObjectRange(ctx, blz, prm) -} - -// reads range of object payload data from blobovnicza and returns GetRangeSmallRes. -func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blobovnicza, prm common.GetRangePrm) (common.GetRangeRes, error) { - var gPrm blobovnicza.GetPrm - gPrm.SetAddress(prm.Address) - - // we don't use GetRange call for now since blobovnicza - // stores data that is compressed on BlobStor side. - // If blobovnicza learns to do the compression itself, - // we can start using GetRange. - res, err := blz.Get(ctx, gPrm) - if err != nil { - return common.GetRangeRes{}, err - } - - // decompress the data - data, err := b.compression.Decompress(res.Object()) - if err != nil { - return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err) - } - - // unmarshal the object - obj := objectSDK.New() - if err := obj.Unmarshal(data); err != nil { - return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err) - } - - from := prm.Range.GetOffset() - to := from + prm.Range.GetLength() - payload := obj.Payload() - - if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { - return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) - } - - return common.GetRangeRes{ - Data: payload[from:to], - }, nil -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/id.go b/pkg/local_object_storage/blobstor/blobovniczatree/id.go deleted file mode 100644 index a080819bc..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/id.go +++ /dev/null @@ -1,17 +0,0 @@ -package blobovniczatree - -// ID represents Blobovnicza identifier. -type ID []byte - -// NewIDFromBytes constructs an ID from a byte slice. -func NewIDFromBytes(v []byte) *ID { - return (*ID)(&v) -} - -func (id ID) Path() string { - return string(id) + dbExtension -} - -func (id ID) Bytes() []byte { - return id -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go deleted file mode 100644 index ceb8fb7e3..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ /dev/null @@ -1,320 +0,0 @@ -package blobovniczatree - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/hrw" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Iterate iterates over all objects in b. -func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) { - var ( - startedAt = time.Now() - err error - ) - defer func() { - b.metrics.Iterate(time.Since(startedAt), err == nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Iterate", - trace.WithAttributes( - attribute.String("path", b.rootPath), - attribute.Bool("ignore_errors", prm.IgnoreErrors), - )) - defer span.End() - - err = b.iterateBlobovniczas(ctx, prm.IgnoreErrors, func(p string, blz *blobovnicza.Blobovnicza) error { - var subPrm blobovnicza.IteratePrm - subPrm.SetHandler(func(elem blobovnicza.IterationElement) error { - data, err := b.compression.Decompress(elem.ObjectData()) - if err != nil { - if prm.IgnoreErrors { - b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.Stringer("address", elem.Address()), - zap.Error(err), - zap.String("storage_id", p), - zap.String("root_path", b.rootPath)) - return nil - } - return fmt.Errorf("decompress object data: %w", err) - } - - if prm.Handler != nil { - return prm.Handler(common.IterationElement{ - Address: elem.Address(), - ObjectData: data, - StorageID: []byte(strings.TrimSuffix(p, dbExtension)), - }) - } - return nil - }) - subPrm.DecodeAddresses() - - _, err := blz.Iterate(ctx, subPrm) - return err - }) - return common.IterateRes{}, err -} - -// iterator over all Blobovniczas in unsorted order. Break on f's error return. -func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error { - return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) { - shBlz := b.getBlobovnicza(ctx, p) - blz, err := shBlz.Open(ctx) - if err != nil { - if ignoreErrors { - b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.Error(err), - zap.String("storage_id", p), - zap.String("root_path", b.rootPath)) - return false, nil - } - return false, fmt.Errorf("open blobovnicza %s: %w", p, err) - } - defer shBlz.Close(ctx) - - err = f(p, blz) - - return err != nil, err - }) -} - -// iterateSortedLeaves iterates over the paths of Blobovniczas sorted by weight. -// -// Uses depth, width and leaf width for iteration. -func (b *Blobovniczas) iterateSortedLeaves(ctx context.Context, addr *oid.Address, f func(string) (bool, error)) error { - _, err := b.iterateSorted( - ctx, - addr, - make([]string, 0, b.blzShallowDepth), - b.blzShallowDepth, - func(p []string) (bool, error) { return f(filepath.Join(p...)) }, - ) - - return err -} - -// iterator over directories with Blobovniczas sorted by weight. -func (b *Blobovniczas) iterateDeepest(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error { - depth := b.blzShallowDepth - if depth > 0 { - depth-- - } - - _, err := b.iterateSorted( - ctx, - &addr, - make([]string, 0, depth), - depth, - func(p []string) (bool, error) { return f(filepath.Join(p...)) }, - ) - - return err -} - -// iterator over particular level of directories. -func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) { - isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth - levelWidth := b.blzShallowWidth - if isLeafLevel { - hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(append([]string{b.rootPath}, curPath...)...)) - if err != nil { - return false, err - } - levelWidth = 0 - if hasDBs { - levelWidth = maxIdx + 1 - } - } - indices := indexSlice(levelWidth) - - if !isLeafLevel { - hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...))) - } - - exec := uint64(len(curPath)) == execDepth - - for i := range indices { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - - lastPart := u64ToHexString(indices[i]) - if isLeafLevel { - lastPart = u64ToHexStringExt(indices[i]) - } - - if i == 0 { - curPath = append(curPath, lastPart) - } else { - curPath[len(curPath)-1] = lastPart - } - - if exec { - if stop, err := f(curPath); err != nil { - return false, err - } else if stop { - return true, nil - } - } else if stop, err := b.iterateSorted(ctx, addr, curPath, execDepth, f); err != nil { - return false, err - } else if stop { - return true, nil - } - } - - return false, nil -} - -// iterateExistingDBPaths iterates over the paths of Blobovniczas without any order. -// -// Uses existed blobovnicza files for iteration. -func (b *Blobovniczas) iterateExistingDBPaths(ctx context.Context, f func(string) (bool, error)) error { - b.dbFilesGuard.RLock() - defer b.dbFilesGuard.RUnlock() - - _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return !strings.HasSuffix(path, rebuildSuffix) }) - return err -} - -func (b *Blobovniczas) iterateExistingPathsDFS(ctx context.Context, path string, f func(string) (bool, error), fileFilter func(path string) bool) (bool, error) { - sysPath := filepath.Join(b.rootPath, path) - entries, err := os.ReadDir(sysPath) - if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode - return false, nil - } - if err != nil { - return false, err - } - for _, entry := range entries { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - if entry.IsDir() { - stop, err := b.iterateExistingPathsDFS(ctx, filepath.Join(path, entry.Name()), f, fileFilter) - if err != nil { - return false, err - } - if stop { - return true, nil - } - } else { - if !fileFilter(entry.Name()) { - continue - } - stop, err := f(filepath.Join(path, entry.Name())) - if err != nil { - return false, err - } - if stop { - return true, nil - } - } - } - return false, nil -} - -// iterateIncompletedRebuildDBPaths iterates over the paths of Blobovniczas with incompleted rebuild files without any order. -func (b *Blobovniczas) iterateIncompletedRebuildDBPaths(ctx context.Context, f func(string) (bool, error)) error { - b.dbFilesGuard.RLock() - defer b.dbFilesGuard.RUnlock() - - _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return strings.HasSuffix(path, rebuildSuffix) }) - return err -} - -func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error { - b.dbFilesGuard.RLock() - defer b.dbFilesGuard.RUnlock() - - _, err := b.iterateSordedDBPathsInternal(ctx, "", addr, f) - return err -} - -func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - - sysPath := filepath.Join(b.rootPath, path) - entries, err := os.ReadDir(sysPath) - if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode - return false, nil - } - if err != nil { - return false, err - } - var dbIdxs []uint64 - var dirIdxs []uint64 - - for _, entry := range entries { - if strings.HasSuffix(entry.Name(), rebuildSuffix) { - continue - } - idx := u64FromHexString(entry.Name()) - if entry.IsDir() { - dirIdxs = append(dirIdxs, idx) - } else { - dbIdxs = append(dbIdxs, idx) - } - } - - if len(dbIdxs) > 0 { - for _, dbIdx := range dbIdxs { - dbPath := filepath.Join(path, u64ToHexStringExt(dbIdx)) - stop, err := f(dbPath) - if err != nil { - return false, err - } - if stop { - return true, nil - } - } - } - - if len(dirIdxs) > 0 { - hrw.SortSliceByValue(dirIdxs, addressHash(&addr, path)) - for _, dirIdx := range dirIdxs { - dirPath := filepath.Join(path, u64ToHexString(dirIdx)) - stop, err := b.iterateSordedDBPathsInternal(ctx, dirPath, addr, f) - if err != nil { - return false, err - } - if stop { - return true, nil - } - } - } - - return false, nil -} - -// makes slice of uint64 values from 0 to number-1. -func indexSlice(number uint64) []uint64 { - s := make([]uint64, number) - - for i := range s { - s[i] = uint64(i) - } - - return s -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go deleted file mode 100644 index 6438f715b..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ /dev/null @@ -1,336 +0,0 @@ -package blobovniczatree - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -var errClosingClosedBlobovnicza = errors.New("closing closed blobovnicza is not allowed") - -// sharedDB is responsible for opening and closing a file of single blobovnicza. -type sharedDB struct { - cond *sync.Cond - blcza *blobovnicza.Blobovnicza - refCount uint32 - - openDBCounter *openDBCounter - closedFlag *atomic.Bool - options []blobovnicza.Option - path string - readOnly bool - metrics blobovnicza.Metrics - log *logger.Logger -} - -func newSharedDB(options []blobovnicza.Option, path string, readOnly bool, - metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *sharedDB { - return &sharedDB{ - cond: &sync.Cond{ - L: &sync.RWMutex{}, - }, - options: options, - path: path, - readOnly: readOnly, - metrics: metrics, - closedFlag: closedFlag, - log: log, - openDBCounter: openDBCounter, - } -} - -func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { - if b.closedFlag.Load() { - return nil, errClosed - } - - b.cond.L.Lock() - defer b.cond.L.Unlock() - - if b.refCount > 0 { - b.refCount++ - return b.blcza, nil - } - - blz := blobovnicza.New(append(b.options, - blobovnicza.WithReadOnly(b.readOnly), - blobovnicza.WithPath(b.path), - blobovnicza.WithMetrics(b.metrics), - )...) - - if err := blz.Open(ctx); err != nil { - return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err) - } - if err := blz.Init(ctx); err != nil { - return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err) - } - - b.refCount++ - b.blcza = blz - b.openDBCounter.Inc() - - return blz, nil -} - -func (b *sharedDB) Close(ctx context.Context) { - b.cond.L.Lock() - defer b.cond.L.Unlock() - - if b.refCount == 0 { - b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) - b.cond.Broadcast() - return - } - - if b.refCount == 1 { - b.refCount = 0 - if err := b.blcza.Close(ctx); err != nil { - b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, - zap.String("id", b.path), - zap.Error(err), - ) - } - b.blcza = nil - b.openDBCounter.Dec() - return - } - - b.refCount-- - if b.refCount == 1 { - b.cond.Broadcast() - } -} - -func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { - b.cond.L.Lock() - if b.refCount > 1 { - b.cond.Wait() - } - defer b.cond.L.Unlock() - - if b.refCount == 0 { - return errClosingClosedBlobovnicza - } - - if err := b.blcza.Close(ctx); err != nil { - b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, - zap.String("id", b.path), - zap.Error(err), - ) - return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err) - } - - b.refCount = 0 - b.blcza = nil - b.openDBCounter.Dec() - - return os.Remove(b.path) -} - -func (b *sharedDB) SystemPath() string { - return b.path -} - -// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDBManager struct { - dbMtx *sync.RWMutex - databases map[uint64]*sharedDB - - options []blobovnicza.Option - path string - readOnly bool - metrics blobovnicza.Metrics - openDBCounter *openDBCounter - closedFlag *atomic.Bool - log *logger.Logger -} - -func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, - readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDBManager { - result := &levelDBManager{ - databases: make(map[uint64]*sharedDB), - dbMtx: &sync.RWMutex{}, - - options: options, - path: filepath.Join(rootPath, lvlPath), - readOnly: readOnly, - metrics: metrics, - openDBCounter: openDBCounter, - closedFlag: closedFlag, - log: log, - } - return result -} - -func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { - res := m.getDBIfExists(idx) - if res != nil { - return res - } - return m.getOrCreateDB(idx) -} - -func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { - m.dbMtx.RLock() - defer m.dbMtx.RUnlock() - - return m.databases[idx] -} - -func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { - m.dbMtx.Lock() - defer m.dbMtx.Unlock() - - db := m.databases[idx] - if db != nil { - return db - } - - db = newSharedDB(m.options, filepath.Join(m.path, u64ToHexStringExt(idx)), m.readOnly, m.metrics, m.openDBCounter, m.closedFlag, m.log) - m.databases[idx] = db - return db -} - -func (m *levelDBManager) hasAnyDB() bool { - m.dbMtx.RLock() - defer m.dbMtx.RUnlock() - - return len(m.databases) > 0 -} - -// dbManager manages the opening and closing of blobovnicza instances. -// -// The blobovnicza opens at the first request, closes after the last request. -type dbManager struct { - levelToManager map[string]*levelDBManager - levelToManagerGuard *sync.RWMutex - closedFlag *atomic.Bool - dbCounter *openDBCounter - - rootPath string - options []blobovnicza.Option - readOnly bool - metrics blobovnicza.Metrics - log *logger.Logger -} - -func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, metrics blobovnicza.Metrics, log *logger.Logger) *dbManager { - return &dbManager{ - rootPath: rootPath, - options: options, - readOnly: readOnly, - metrics: metrics, - levelToManager: make(map[string]*levelDBManager), - levelToManagerGuard: &sync.RWMutex{}, - log: log, - closedFlag: &atomic.Bool{}, - dbCounter: newOpenDBCounter(), - } -} - -func (m *dbManager) GetByPath(path string) *sharedDB { - lvlPath := filepath.Dir(path) - curIndex := u64FromHexString(filepath.Base(path)) - levelManager := m.getLevelManager(lvlPath) - return levelManager.GetByIndex(curIndex) -} - -func (m *dbManager) CleanResources(path string) { - lvlPath := filepath.Dir(path) - - m.levelToManagerGuard.Lock() - defer m.levelToManagerGuard.Unlock() - - if result, ok := m.levelToManager[lvlPath]; ok && !result.hasAnyDB() { - delete(m.levelToManager, lvlPath) - } -} - -func (m *dbManager) Open() { - m.closedFlag.Store(false) -} - -func (m *dbManager) Close() { - m.closedFlag.Store(true) - m.dbCounter.WaitUntilAllClosed() -} - -func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { - result := m.getLevelManagerIfExists(lvlPath) - if result != nil { - return result - } - return m.getOrCreateLevelManager(lvlPath) -} - -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { - m.levelToManagerGuard.RLock() - defer m.levelToManagerGuard.RUnlock() - - return m.levelToManager[lvlPath] -} - -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { - m.levelToManagerGuard.Lock() - defer m.levelToManagerGuard.Unlock() - - if result, ok := m.levelToManager[lvlPath]; ok { - return result - } - - result := newLevelDBManager(m.options, m.rootPath, lvlPath, m.readOnly, m.metrics, m.dbCounter, m.closedFlag, m.log) - m.levelToManager[lvlPath] = result - return result -} - -type openDBCounter struct { - cond *sync.Cond - count uint64 -} - -func newOpenDBCounter() *openDBCounter { - return &openDBCounter{ - cond: &sync.Cond{ - L: &sync.Mutex{}, - }, - } -} - -func (c *openDBCounter) Inc() { - c.cond.L.Lock() - defer c.cond.L.Unlock() - - c.count++ -} - -func (c *openDBCounter) Dec() { - c.cond.L.Lock() - defer c.cond.L.Unlock() - - if c.count > 0 { - c.count-- - } - - if c.count == 0 { - c.cond.Broadcast() - } -} - -func (c *openDBCounter) WaitUntilAllClosed() { - c.cond.L.Lock() - for c.count > 0 { - c.cond.Wait() - } - c.cond.L.Unlock() -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go b/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go deleted file mode 100644 index 68dc7ff38..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go +++ /dev/null @@ -1,55 +0,0 @@ -package blobovniczatree - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -const ( - rebuildStatusNotStarted = "not_started" - rebuildStatusRunning = "running" - rebuildStatusCompleted = "completed" - rebuildStatusFailed = "failed" -) - -type Metrics interface { - Blobovnicza() blobovnicza.Metrics - - SetParentID(parentID string) - - SetMode(mode.ComponentMode) - Close() - - SetRebuildStatus(status string) - ObjectMoved(d time.Duration) - SetRebuildPercent(value uint32) - ObjectsCount(d time.Duration, success bool) - - Delete(d time.Duration, success, withStorageID bool) - Exists(d time.Duration, success, withStorageID bool) - GetRange(d time.Duration, size int, success, withStorageID bool) - Get(d time.Duration, size int, success, withStorageID bool) - Iterate(d time.Duration, success bool) - Put(d time.Duration, size int, success bool) -} - -type noopMetrics struct{} - -func (m *noopMetrics) SetParentID(string) {} -func (m *noopMetrics) SetMode(mode.ComponentMode) {} -func (m *noopMetrics) Close() {} -func (m *noopMetrics) SetRebuildStatus(string) {} -func (m *noopMetrics) SetRebuildPercent(uint32) {} -func (m *noopMetrics) ObjectMoved(time.Duration) {} -func (m *noopMetrics) Delete(time.Duration, bool, bool) {} -func (m *noopMetrics) Exists(time.Duration, bool, bool) {} -func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {} -func (m *noopMetrics) Get(time.Duration, int, bool, bool) {} -func (m *noopMetrics) Iterate(time.Duration, bool) {} -func (m *noopMetrics) Put(time.Duration, int, bool) {} -func (m *noopMetrics) ObjectsCount(time.Duration, bool) {} -func (m *noopMetrics) Blobovnicza() blobovnicza.Metrics { - return &blobovnicza.NoopMetrics{} -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go deleted file mode 100644 index 5f268b0f2..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ /dev/null @@ -1,160 +0,0 @@ -package blobovniczatree - -import ( - "context" - "io/fs" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -type cfg struct { - log *logger.Logger - perm fs.FileMode - readOnly bool - rootPath string - openedCacheSize int - blzShallowDepth uint64 - blzShallowWidth uint64 - compression *compression.Compressor - blzOpts []blobovnicza.Option - reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. - metrics Metrics - waitBeforeDropDB time.Duration - blzInitWorkerCount int - blzMoveBatchSize int - // TTL for blobovnicza's cache - openedCacheTTL time.Duration - // Interval for deletion expired blobovnicza's - openedCacheExpInterval time.Duration -} - -type Option func(*cfg) - -const ( - defaultPerm = 0o700 - defaultOpenedCacheSize = 50 - defaultOpenedCacheTTL = 0 // means expiring is off - defaultOpenedCacheInterval = 15 * time.Second - defaultBlzShallowDepth = 2 - defaultBlzShallowWidth = 16 - defaultWaitBeforeDropDB = 10 * time.Second - defaultBlzInitWorkerCount = 5 - defaulBlzMoveBatchSize = 10000 -) - -func initConfig(c *cfg) { - *c = cfg{ - log: logger.NewLoggerWrapper(zap.L()), - perm: defaultPerm, - openedCacheSize: defaultOpenedCacheSize, - openedCacheTTL: defaultOpenedCacheTTL, - openedCacheExpInterval: defaultOpenedCacheInterval, - blzShallowDepth: defaultBlzShallowDepth, - blzShallowWidth: defaultBlzShallowWidth, - reportError: func(context.Context, string, error) {}, - metrics: &noopMetrics{}, - waitBeforeDropDB: defaultWaitBeforeDropDB, - blzInitWorkerCount: defaultBlzInitWorkerCount, - blzMoveBatchSize: defaulBlzMoveBatchSize, - } -} - -func WithBlobovniczaTreeLogger(log *logger.Logger) Option { - return func(c *cfg) { - c.log = log - } -} - -func WithBlobovniczaLogger(log *logger.Logger) Option { - return func(c *cfg) { - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) - } -} - -func WithPermissions(perm fs.FileMode) Option { - return func(c *cfg) { - c.perm = perm - } -} - -func WithBlobovniczaShallowWidth(width uint64) Option { - return func(c *cfg) { - c.blzShallowWidth = width - } -} - -func WithBlobovniczaShallowDepth(depth uint64) Option { - return func(c *cfg) { - c.blzShallowDepth = depth - } -} - -func WithRootPath(p string) Option { - return func(c *cfg) { - c.rootPath = p - } -} - -func WithBlobovniczaSize(sz uint64) Option { - return func(c *cfg) { - c.blzOpts = append(c.blzOpts, blobovnicza.WithFullSizeLimit(sz)) - } -} - -func WithOpenedCacheSize(sz int) Option { - return func(c *cfg) { - c.openedCacheSize = sz - } -} - -func WithOpenedCacheTTL(ttl time.Duration) Option { - return func(c *cfg) { - c.openedCacheTTL = ttl - } -} - -func WithOpenedCacheExpInterval(expInterval time.Duration) Option { - return func(c *cfg) { - c.openedCacheExpInterval = expInterval - } -} - -func WithObjectSizeLimit(sz uint64) Option { - return func(c *cfg) { - c.blzOpts = append(c.blzOpts, blobovnicza.WithObjectSizeLimit(sz)) - } -} - -func WithMetrics(m Metrics) Option { - return func(c *cfg) { - c.metrics = m - } -} - -func WithWaitBeforeDropDB(t time.Duration) Option { - return func(c *cfg) { - c.waitBeforeDropDB = t - } -} - -func WithMoveBatchSize(v int) Option { - return func(c *cfg) { - c.blzMoveBatchSize = v - } -} - -// WithInitWorkerCount sets maximum workers count to init blobovnicza tree. -// -// Negative or zero value means no limit. -func WithInitWorkerCount(v int) Option { - if v <= 0 { - v = -1 - } - return func(c *cfg) { - c.blzInitWorkerCount = v - } -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go deleted file mode 100644 index 37c49d741..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ /dev/null @@ -1,118 +0,0 @@ -package blobovniczatree - -import ( - "context" - "errors" - "path/filepath" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Put saves object in the maximum weight blobobnicza. -// -// returns error if could not save object in any blobovnicza. -func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { - var ( - success bool - size int - startedAt = time.Now() - ) - defer func() { - b.metrics.Put(time.Since(startedAt), size, success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Put", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.Bool("dont_compress", prm.DontCompress), - )) - defer span.End() - - if b.readOnly { - return common.PutRes{}, common.ErrReadOnly - } - - if !prm.DontCompress { - prm.RawData = b.compression.Compress(prm.RawData) - } - size = len(prm.RawData) - - var putPrm blobovnicza.PutPrm - putPrm.SetAddress(prm.Address) - putPrm.SetMarshaledObject(prm.RawData) - - it := &putIterator{ - B: b, - ID: nil, - AllFull: true, - PutPrm: putPrm, - } - - if err := b.iterateDeepest(ctx, prm.Address, func(s string) (bool, error) { return it.iterate(ctx, s) }); err != nil { - return common.PutRes{}, err - } else if it.ID == nil { - if it.AllFull { - return common.PutRes{}, common.ErrNoSpace - } - return common.PutRes{}, errPutFailed - } - - success = true - return common.PutRes{StorageID: it.ID.Bytes()}, nil -} - -type putIterator struct { - B *Blobovniczas - ID *ID - AllFull bool - PutPrm blobovnicza.PutPrm -} - -func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) { - active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) - if err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) - } else { - i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, - zap.Error(err)) - } - - return false, nil - } - - if active == nil { - i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) - return false, nil - } - defer active.Close(ctx) - - i.AllFull = false - - _, err = active.Blobovnicza().Put(ctx, i.PutPrm) - if err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) - } else { - i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, - zap.String("path", active.SystemPath()), - zap.Error(err)) - } - if errors.Is(err, blobovnicza.ErrNoSpace) { - i.AllFull = true - } - return false, nil - } - - idx := u64FromHexString(filepath.Base(active.SystemPath())) - i.ID = NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(idx)))) - - return true, nil -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go deleted file mode 100644 index a840275b8..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ /dev/null @@ -1,618 +0,0 @@ -package blobovniczatree - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -const rebuildSuffix = ".rebuild" - -var ( - errRebuildInProgress = errors.New("rebuild is in progress, the operation cannot be performed") - errBatchFull = errors.New("batch full") -) - -func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (common.RebuildRes, error) { - if b.readOnly { - return common.RebuildRes{}, common.ErrReadOnly - } - - b.metrics.SetRebuildStatus(rebuildStatusRunning) - b.metrics.SetRebuildPercent(0) - success := true - defer func() { - if success { - b.metrics.SetRebuildStatus(rebuildStatusCompleted) - } else { - b.metrics.SetRebuildStatus(rebuildStatusFailed) - } - }() - - b.rebuildGuard.Lock() - defer b.rebuildGuard.Unlock() - - var res common.RebuildRes - - b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) - completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter) - res.ObjectsMoved += completedPreviosMoves - if err != nil { - b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) - success = false - return res, err - } - b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess) - - b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild) - dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent) - if err != nil { - b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err)) - success = false - return res, err - } - - b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate))) - res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res) - if err != nil { - success = false - } - return res, err -} - -func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) { - var completedDBCount uint32 - for _, db := range dbs { - b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) - movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter) - res.ObjectsMoved += movedObjects - if err != nil { - b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) - return res, err - } - b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects)) - res.FilesRemoved++ - completedDBCount++ - b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs))) - } - b.metrics.SetRebuildPercent(100) - return res, nil -} - -func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, fillPercent int) ([]string, error) { - withSchemaChange, err := b.selectDBsDoNotMatchSchema(ctx) - if err != nil { - return nil, err - } - withFillPercent, err := b.selectDBsDoNotMatchFillPercent(ctx, fillPercent) - if err != nil { - return nil, err - } - for k := range withFillPercent { - withSchemaChange[k] = struct{}{} - } - result := make([]string, 0, len(withSchemaChange)) - for db := range withSchemaChange { - result = append(result, db) - } - return result, nil -} - -func (b *Blobovniczas) selectDBsDoNotMatchSchema(ctx context.Context) (map[string]struct{}, error) { - dbsToMigrate := make(map[string]struct{}) - if err := b.iterateExistingDBPaths(ctx, func(s string) (bool, error) { - dbsToMigrate[s] = struct{}{} - return false, nil - }); err != nil { - return nil, err - } - if err := b.iterateSortedLeaves(ctx, nil, func(s string) (bool, error) { - delete(dbsToMigrate, s) - return false, nil - }); err != nil { - return nil, err - } - return dbsToMigrate, nil -} - -func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, target int) (map[string]struct{}, error) { - if target <= 0 || target > 100 { - return nil, fmt.Errorf("invalid fill percent value %d: must be (0; 100]", target) - } - result := make(map[string]struct{}) - if err := b.iterateDeepest(ctx, oid.Address{}, func(lvlPath string) (bool, error) { - dir := filepath.Join(b.rootPath, lvlPath) - entries, err := os.ReadDir(dir) - if os.IsNotExist(err) { // non initialized tree - return false, nil - } - if err != nil { - return false, err - } - hasDBs := false - // db with maxIdx could be an active, so it should not be rebuilded - var maxIdx uint64 - for _, e := range entries { - if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) { - continue - } - hasDBs = true - maxIdx = max(u64FromHexString(e.Name()), maxIdx) - } - if !hasDBs { - return false, nil - } - for _, e := range entries { - if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) { - continue - } - if u64FromHexString(e.Name()) == maxIdx { - continue - } - path := filepath.Join(lvlPath, e.Name()) - resettlementRequired, err := b.rebuildBySize(ctx, path, target) - if err != nil { - return false, err - } - if resettlementRequired { - result[path] = struct{}{} - } - } - return false, nil - }); err != nil { - return nil, err - } - return result, nil -} - -func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) { - shDB := b.getBlobovnicza(ctx, path) - blz, err := shDB.Open(ctx) - if err != nil { - return false, err - } - defer shDB.Close(ctx) - fp := blz.FillPercent() - // accepted fill percent defines as - // |----|+++++++++++++++++|+++++++++++++++++|--------------- - // 0% target 100% 100+(100 - target) - // where `+` - accepted fill percent, `-` - not accepted fill percent - return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil -} - -func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { - shDB := b.getBlobovnicza(ctx, path) - blz, err := shDB.Open(ctx) - if err != nil { - return 0, err - } - shDBClosed := false - defer func() { - if shDBClosed { - return - } - shDB.Close(ctx) - }() - dropTempFile, err := b.addRebuildTempFile(ctx, path) - if err != nil { - return 0, err - } - migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter) - if err != nil { - return migratedObjects, err - } - shDBClosed, err = b.dropDB(ctx, path, shDB) - if err == nil { - // drop only on success to continue rebuild on error - dropTempFile() - } - return migratedObjects, err -} - -func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { - sysPath := filepath.Join(b.rootPath, path) - sysPath += rebuildSuffix - _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) - if err != nil { - return nil, err - } - return func() { - if err := os.Remove(sysPath); err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) - } - }, nil -} - -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) { - var result atomic.Uint64 - batch := make(map[oid.Address][]byte) - - var prm blobovnicza.IteratePrm - prm.DecodeAddresses() - prm.SetHandler(func(ie blobovnicza.IterationElement) error { - batch[ie.Address()] = bytes.Clone(ie.ObjectData()) - if len(batch) == b.blzMoveBatchSize { - return errBatchFull - } - return nil - }) - - for { - release, err := limiter.ReadRequest(ctx) - if err != nil { - return result.Load(), err - } - _, err = blz.Iterate(ctx, prm) - release() - if err != nil && !errors.Is(err, errBatchFull) { - return result.Load(), err - } - - if len(batch) == 0 { - break - } - - eg, egCtx := errgroup.WithContext(ctx) - - for addr, data := range batch { - release, err := limiter.AcquireWorkSlot(egCtx) - if err != nil { - _ = eg.Wait() - return result.Load(), err - } - eg.Go(func() error { - defer release() - moveRelease, err := limiter.WriteRequest(ctx) - if err != nil { - return err - } - err = b.moveObject(egCtx, blz, blzPath, addr, data, meta) - moveRelease() - if err == nil { - result.Add(1) - } - return err - }) - } - if err := eg.Wait(); err != nil { - return result.Load(), err - } - - batch = make(map[oid.Address][]byte) - } - - return result.Load(), nil -} - -func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string, - addr oid.Address, data []byte, metaStore common.MetaStorage, -) error { - startedAt := time.Now() - defer func() { - b.metrics.ObjectMoved(time.Since(startedAt)) - }() - it := &moveIterator{ - B: b, - ID: nil, - AllFull: true, - Address: addr, - ObjectData: data, - MetaStore: metaStore, - Source: source, - SourceSysPath: sourcePath, - } - - if err := b.iterateDeepest(ctx, addr, func(lvlPath string) (bool, error) { return it.tryMoveToLvl(ctx, lvlPath) }); err != nil { - return err - } else if it.ID == nil { - if it.AllFull { - return common.ErrNoSpace - } - return errPutFailed - } - return nil -} - -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { - select { - case <-ctx.Done(): - return false, ctx.Err() - case <-time.After(b.waitBeforeDropDB): // to complete requests with old storage ID - } - - b.dbCache.EvictAndMarkNonCached(path) - defer b.dbCache.RemoveFromNonCached(path) - - b.dbFilesGuard.Lock() - defer b.dbFilesGuard.Unlock() - - if err := shDB.CloseAndRemoveFile(ctx); err != nil { - return false, err - } - b.commondbManager.CleanResources(path) - if err := b.dropDirectoryIfEmpty(filepath.Dir(path)); err != nil { - return true, err - } - return true, nil -} - -func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error { - if path == "." { - return nil - } - - sysPath := filepath.Join(b.rootPath, path) - entries, err := os.ReadDir(sysPath) - if err != nil { - return err - } - if len(entries) > 0 { - return nil - } - if err := os.Remove(sysPath); err != nil { - return err - } - return b.dropDirectoryIfEmpty(filepath.Dir(path)) -} - -func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) { - var count uint64 - var rebuildTempFilesToRemove []string - err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { - rebuildTmpFilePath := s - s = strings.TrimSuffix(s, rebuildSuffix) - shDB := b.getBlobovnicza(ctx, s) - blz, err := shDB.Open(ctx) - if err != nil { - return true, err - } - defer shDB.Close(ctx) - - release, err := rateLimiter.ReadRequest(ctx) - if err != nil { - return false, err - } - incompletedMoves, err := blz.ListMoveInfo(ctx) - release() - if err != nil { - return true, err - } - - for _, move := range incompletedMoves { - release, err := rateLimiter.WriteRequest(ctx) - if err != nil { - return false, err - } - err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore) - release() - if err != nil { - return true, err - } - count++ - } - - rebuildTempFilesToRemove = append(rebuildTempFilesToRemove, rebuildTmpFilePath) - return false, nil - }) - for _, tmp := range rebuildTempFilesToRemove { - release, err := rateLimiter.WriteRequest(ctx) - if err != nil { - return count, err - } - if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) - } - release() - } - return count, err -} - -func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string, - move blobovnicza.MoveInfo, metaStore common.MetaStorage, -) error { - targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path()) - target, err := targetDB.Open(ctx) - if err != nil { - return err - } - defer targetDB.Close(ctx) - - existsInSource := true - var gPrm blobovnicza.GetPrm - gPrm.SetAddress(move.Address) - gRes, err := source.Get(ctx, gPrm) - if err != nil { - if client.IsErrObjectNotFound(err) { - existsInSource = false - } else { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) - return err - } - } - - if !existsInSource { // object was deleted by Rebuild, need to delete move info - if err = source.DropMoveInfo(ctx, move.Address); err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) - return err - } - b.deleteProtectedObjects.Delete(move.Address) - return nil - } - - existsInTarget, err := target.Exists(ctx, move.Address) - if err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) - return err - } - - if !existsInTarget { - var putPrm blobovnicza.PutPrm - putPrm.SetAddress(move.Address) - putPrm.SetMarshaledObject(gRes.Object()) - _, err = target.Put(ctx, putPrm) - if err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err)) - return err - } - } - - if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address)) - return err - } - - var deletePrm blobovnicza.DeletePrm - deletePrm.SetAddress(move.Address) - if _, err = source.Delete(ctx, deletePrm); err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err)) - return err - } - - if err = source.DropMoveInfo(ctx, move.Address); err != nil { - b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) - return err - } - - b.deleteProtectedObjects.Delete(move.Address) - return nil -} - -type moveIterator struct { - B *Blobovniczas - ID *ID - AllFull bool - Address oid.Address - ObjectData []byte - MetaStore common.MetaStorage - Source *blobovnicza.Blobovnicza - SourceSysPath string -} - -func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) { - target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) - if err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) - } else { - i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) - } - return false, nil - } - - if target == nil { - i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) - return false, nil - } - defer target.Close(ctx) - - i.AllFull = false - - targetIDx := u64FromHexString(filepath.Base(target.SystemPath())) - targetStorageID := NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(targetIDx)))) - - if err = i.Source.PutMoveInfo(ctx, blobovnicza.MoveInfo{ - Address: i.Address, - TargetStorageID: targetStorageID.Bytes(), - }); err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) - } else { - i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) - } - return true, nil - } - i.B.deleteProtectedObjects.Add(i.Address) - - var putPrm blobovnicza.PutPrm - putPrm.SetAddress(i.Address) - putPrm.SetMarshaledObject(i.ObjectData) - putPrm.SetForce(true) - - _, err = target.Blobovnicza().Put(ctx, putPrm) - if err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) - } else { - i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) - } - return true, nil - } - - if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil { - i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address)) - return true, nil - } - - var deletePrm blobovnicza.DeletePrm - deletePrm.SetAddress(i.Address) - if _, err = i.Source.Delete(ctx, deletePrm); err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err) - } else { - i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) - } - return true, nil - } - - if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil { - if !isLogical(err) { - i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err) - } else { - i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) - } - return true, nil - } - i.B.deleteProtectedObjects.Delete(i.Address) - - i.ID = targetStorageID - return true, nil -} - -type addressMap struct { - data map[oid.Address]struct{} - guard *sync.RWMutex -} - -func newAddressMap() *addressMap { - return &addressMap{ - data: make(map[oid.Address]struct{}), - guard: &sync.RWMutex{}, - } -} - -func (m *addressMap) Add(address oid.Address) { - m.guard.Lock() - defer m.guard.Unlock() - - m.data[address] = struct{}{} -} - -func (m *addressMap) Delete(address oid.Address) { - m.guard.Lock() - defer m.guard.Unlock() - - delete(m.data, address) -} - -func (m *addressMap) Contains(address oid.Address) bool { - m.guard.RLock() - defer m.guard.RUnlock() - - _, contains := m.data[address] - return contains -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go deleted file mode 100644 index 4146ef260..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package blobovniczatree - -import ( - "bytes" - "context" - "os" - "path/filepath" - "sync" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestRebuildFailover(t *testing.T) { - t.Parallel() - - t.Run("only move info saved", testRebuildFailoverOnlyMoveInfoSaved) - - t.Run("object saved to target", testRebuildFailoverObjectSavedToTarget) - - t.Run("object deleted from source", testRebuildFailoverObjectDeletedFromSource) -} - -func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { - t.Parallel() - dir := t.TempDir() - - blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - obj := blobstortest.NewObject(1024) - data, err := obj.Marshal() - require.NoError(t, err) - - var pPrm blobovnicza.PutPrm - pPrm.SetAddress(object.AddressOf(obj)) - pPrm.SetMarshaledObject(data) - _, err = blz.Put(context.Background(), pPrm) - require.NoError(t, err) - - require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{ - Address: object.AddressOf(obj), - TargetStorageID: []byte("0/0/0"), - })) - - require.NoError(t, blz.Close(context.Background())) - _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) - require.NoError(t, err) - - testRebuildFailoverValidate(t, dir, obj, true) -} - -func testRebuildFailoverObjectSavedToTarget(t *testing.T) { - t.Parallel() - dir := t.TempDir() - - blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - obj := blobstortest.NewObject(1024) - data, err := obj.Marshal() - require.NoError(t, err) - - var pPrm blobovnicza.PutPrm - pPrm.SetAddress(object.AddressOf(obj)) - pPrm.SetMarshaledObject(data) - _, err = blz.Put(context.Background(), pPrm) - require.NoError(t, err) - - require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{ - Address: object.AddressOf(obj), - TargetStorageID: []byte("0/0/0"), - })) - - require.NoError(t, blz.Close(context.Background())) - - _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) - require.NoError(t, err) - - blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - _, err = blz.Put(context.Background(), pPrm) - require.NoError(t, err) - - require.NoError(t, blz.Close(context.Background())) - - testRebuildFailoverValidate(t, dir, obj, true) -} - -func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { - t.Parallel() - dir := t.TempDir() - - blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - obj := blobstortest.NewObject(1024) - data, err := obj.Marshal() - require.NoError(t, err) - - require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{ - Address: object.AddressOf(obj), - TargetStorageID: []byte("0/0/0"), - })) - - require.NoError(t, blz.Close(context.Background())) - - _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) - require.NoError(t, err) - - blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - var pPrm blobovnicza.PutPrm - pPrm.SetAddress(object.AddressOf(obj)) - pPrm.SetMarshaledObject(data) - _, err = blz.Put(context.Background(), pPrm) - require.NoError(t, err) - - require.NoError(t, blz.Close(context.Background())) - - testRebuildFailoverValidate(t, dir, obj, false) -} - -func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(2048), - WithBlobovniczaShallowWidth(2), - WithBlobovniczaShallowDepth(2), - WithRootPath(dir), - WithBlobovniczaSize(10*1024), - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - var dPrm common.DeletePrm - dPrm.Address = object.AddressOf(obj) - dPrm.StorageID = []byte("0/0/1") - _, err := b.Delete(context.Background(), dPrm) - require.ErrorIs(t, err, errObjectIsDeleteProtected) - - metaStub := &storageIDUpdateStub{ - storageIDs: make(map[oid.Address][]byte), - guard: &sync.Mutex{}, - } - limiter := &rebuildLimiterStub{} - rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 1, - }) - require.NoError(t, err) - require.Equal(t, uint64(1), rRes.ObjectsMoved) - require.Equal(t, uint64(0), rRes.FilesRemoved) - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) - - blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - moveInfo, err := blz.ListMoveInfo(context.Background()) - require.NoError(t, err) - require.Equal(t, 0, len(moveInfo)) - - var gPrm blobovnicza.GetPrm - gPrm.SetAddress(object.AddressOf(obj)) - _, err = blz.Get(context.Background(), gPrm) - require.True(t, client.IsErrObjectNotFound(err)) - - require.NoError(t, blz.Close(context.Background())) - - blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init(context.Background())) - - moveInfo, err = blz.ListMoveInfo(context.Background()) - require.NoError(t, err) - require.Equal(t, 0, len(moveInfo)) - - gRes, err := blz.Get(context.Background(), gPrm) - require.NoError(t, err) - require.True(t, len(gRes.Object()) > 0) - - if mustUpdateStorageID { - require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)])) - } - - require.NoError(t, blz.Close(context.Background())) - - _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild")) - require.True(t, os.IsNotExist(err)) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go deleted file mode 100644 index a7a99fec3..000000000 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ /dev/null @@ -1,520 +0,0 @@ -package blobovniczatree - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestBlobovniczaTreeSchemaRebuild(t *testing.T) { - t.Parallel() - - t.Run("width increased", func(t *testing.T) { - t.Parallel() - testBlobovniczaTreeRebuildHelper(t, 2, 2, 2, 3, false) - }) - - t.Run("width reduced", func(t *testing.T) { - t.Parallel() - testBlobovniczaTreeRebuildHelper(t, 2, 2, 2, 1, true) - }) - - t.Run("depth increased", func(t *testing.T) { - t.Parallel() - testBlobovniczaTreeRebuildHelper(t, 1, 2, 2, 2, true) - }) - - t.Run("depth reduced", func(t *testing.T) { - t.Parallel() - testBlobovniczaTreeRebuildHelper(t, 2, 2, 1, 2, true) - }) -} - -func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { - t.Parallel() - - t.Run("no rebuild by fill percent", func(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(64*1024), - WithBlobovniczaShallowWidth(1), // single directory - WithBlobovniczaShallowDepth(1), - WithRootPath(dir), - WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - storageIDs := make(map[oid.Address][]byte) - for range 100 { - obj := blobstortest.NewObject(64 * 1024) // 64KB object - data, err := obj.Marshal() - require.NoError(t, err) - var prm common.PutPrm - prm.Address = object.AddressOf(obj) - prm.RawData = data - res, err := b.Put(context.Background(), prm) - require.NoError(t, err) - storageIDs[prm.Address] = res.StorageID - } - metaStub := &storageIDUpdateStub{ - storageIDs: storageIDs, - guard: &sync.Mutex{}, - } - limiter := &rebuildLimiterStub{} - rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 60, - }) - require.NoError(t, err) - dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 - require.False(t, dataMigrated) - - for addr, storageID := range storageIDs { - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) - }) - - t.Run("no rebuild single db", func(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(64*1024), - WithBlobovniczaShallowWidth(1), // single directory - WithBlobovniczaShallowDepth(1), - WithRootPath(dir), - WithBlobovniczaSize(100*1024), // 100 KB soft limit for each blobovnicza - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - storageIDs := make(map[oid.Address][]byte) - obj := blobstortest.NewObject(64 * 1024) // 64KB object - data, err := obj.Marshal() - require.NoError(t, err) - var prm common.PutPrm - prm.Address = object.AddressOf(obj) - prm.RawData = data - res, err := b.Put(context.Background(), prm) - require.NoError(t, err) - storageIDs[prm.Address] = res.StorageID - metaStub := &storageIDUpdateStub{ - storageIDs: storageIDs, - guard: &sync.Mutex{}, - } - limiter := &rebuildLimiterStub{} - rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 90, // 64KB / 100KB = 64% - }) - require.NoError(t, err) - dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 - require.False(t, dataMigrated) - - for addr, storageID := range storageIDs { - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) - }) - - t.Run("rebuild by fill percent", func(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(64*1024), - WithBlobovniczaShallowWidth(1), // single directory - WithBlobovniczaShallowDepth(1), - WithRootPath(dir), - WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - storageIDs := make(map[oid.Address][]byte) - toDelete := make(map[oid.Address][]byte) - for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created - obj := blobstortest.NewObject(64 * 1024) - data, err := obj.Marshal() - require.NoError(t, err) - var prm common.PutPrm - prm.Address = object.AddressOf(obj) - prm.RawData = data - res, err := b.Put(context.Background(), prm) - require.NoError(t, err) - storageIDs[prm.Address] = res.StorageID - if i%2 == 1 { - toDelete[prm.Address] = res.StorageID - } - } - for addr, storageID := range toDelete { - var prm common.DeletePrm - prm.Address = addr - prm.StorageID = storageID - _, err := b.Delete(context.Background(), prm) - require.NoError(t, err) - } - metaStub := &storageIDUpdateStub{ - storageIDs: storageIDs, - guard: &sync.Mutex{}, - } - limiter := &rebuildLimiterStub{} - rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 80, - }) - require.NoError(t, err) - require.Equal(t, uint64(49), rRes.FilesRemoved) - require.Equal(t, uint64(49), rRes.ObjectsMoved) // 49 DBs with 1 objects - require.Equal(t, uint64(49), metaStub.updatedCount) - - for addr, storageID := range storageIDs { - if _, found := toDelete[addr]; found { - continue - } - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) - }) - - t.Run("rebuild by overflow", func(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(64*1024), - WithBlobovniczaShallowWidth(1), // single directory - WithBlobovniczaShallowDepth(1), - WithRootPath(dir), - WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - storageIDs := make(map[oid.Address][]byte) - for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created - obj := blobstortest.NewObject(64 * 1024) - data, err := obj.Marshal() - require.NoError(t, err) - var prm common.PutPrm - prm.Address = object.AddressOf(obj) - prm.RawData = data - res, err := b.Put(context.Background(), prm) - require.NoError(t, err) - storageIDs[prm.Address] = res.StorageID - } - metaStub := &storageIDUpdateStub{ - storageIDs: storageIDs, - guard: &sync.Mutex{}, - } - require.NoError(t, b.Close(context.Background())) - b = NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(64*1024), - WithBlobovniczaShallowWidth(1), - WithBlobovniczaShallowDepth(1), - WithRootPath(dir), - WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - limiter := &rebuildLimiterStub{} - rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 80, - }) - require.NoError(t, err) - require.Equal(t, uint64(49), rRes.FilesRemoved) - require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects - require.Equal(t, uint64(98), metaStub.updatedCount) - - for addr, storageID := range storageIDs { - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) - }) -} - -func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(64*1024), // 64KB object size limit - WithBlobovniczaShallowWidth(5), - WithBlobovniczaShallowDepth(2), // depth = 2 - WithRootPath(dir), - WithBlobovniczaSize(100*1024), - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - obj := blobstortest.NewObject(64 * 1024) // 64KB object - data, err := obj.Marshal() - require.NoError(t, err) - var prm common.PutPrm - prm.Address = object.AddressOf(obj) - prm.RawData = data - res, err := b.Put(context.Background(), prm) - require.NoError(t, err) - - storageIDs := make(map[oid.Address][]byte) - storageIDs[prm.Address] = res.StorageID - - require.NoError(t, b.Close(context.Background())) - - b = NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(32*1024), // 32KB object size limit - WithBlobovniczaShallowWidth(5), - WithBlobovniczaShallowDepth(3), // depth = 3 - WithRootPath(dir), - WithBlobovniczaSize(100*1024), - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - metaStub := &storageIDUpdateStub{ - storageIDs: storageIDs, - guard: &sync.Mutex{}, - } - limiter := &rebuildLimiterStub{} - var rPrm common.RebuildPrm - rPrm.MetaStorage = metaStub - rPrm.Limiter = limiter - rPrm.FillPercent = 1 - rRes, err := b.Rebuild(context.Background(), rPrm) - require.NoError(t, err) - dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 - require.True(t, dataMigrated) - - for addr, storageID := range storageIDs { - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) -} - -func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { - dir := t.TempDir() - b := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(2048), - WithBlobovniczaShallowWidth(sourceWidth), - WithBlobovniczaShallowDepth(sourceDepth), - WithRootPath(dir), - WithBlobovniczaSize(100*1024), - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(3)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - eg, egCtx := errgroup.WithContext(context.Background()) - storageIDs := make(map[oid.Address][]byte) - storageIDsGuard := &sync.Mutex{} - for range 100 { - eg.Go(func() error { - obj := blobstortest.NewObject(1024) - data, err := obj.Marshal() - if err != nil { - return err - } - var prm common.PutPrm - prm.Address = object.AddressOf(obj) - prm.RawData = data - res, err := b.Put(egCtx, prm) - if err != nil { - return err - } - storageIDsGuard.Lock() - storageIDs[prm.Address] = res.StorageID - storageIDsGuard.Unlock() - return nil - }) - } - - require.NoError(t, eg.Wait()) - require.NoError(t, b.Close(context.Background())) - - b = NewBlobovniczaTree( - context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), - WithObjectSizeLimit(2048), - WithBlobovniczaShallowWidth(targetWidth), - WithBlobovniczaShallowDepth(targetDepth), - WithRootPath(dir), - WithBlobovniczaSize(100*1024), - WithWaitBeforeDropDB(0), - WithOpenedCacheSize(1000), - WithMoveBatchSize(50)) - require.NoError(t, b.Open(mode.ComponentReadWrite)) - require.NoError(t, b.Init()) - - for addr, storageID := range storageIDs { - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - metaStub := &storageIDUpdateStub{ - storageIDs: storageIDs, - guard: &sync.Mutex{}, - } - limiter := &rebuildLimiterStub{} - var rPrm common.RebuildPrm - rPrm.MetaStorage = metaStub - rPrm.Limiter = limiter - rPrm.FillPercent = 1 - rRes, err := b.Rebuild(context.Background(), rPrm) - require.NoError(t, err) - dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 - require.Equal(t, shouldMigrate, dataMigrated) - - for addr, storageID := range storageIDs { - var gPrm common.GetPrm - gPrm.Address = addr - gPrm.StorageID = storageID - _, err := b.Get(context.Background(), gPrm) - require.NoError(t, err) - } - - require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) -} - -type storageIDUpdateStub struct { - guard *sync.Mutex - storageIDs map[oid.Address][]byte - updatedCount uint64 -} - -func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error { - s.guard.Lock() - defer s.guard.Unlock() - - s.storageIDs[addr] = storageID - s.updatedCount++ - return nil -} - -type rebuildLimiterStub struct { - slots atomic.Int64 - readRequests atomic.Int64 - writeRequests atomic.Int64 -} - -func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { - s.slots.Add(1) - return func() { s.slots.Add(-1) }, nil -} - -func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { - s.readRequests.Add(1) - return func() { s.readRequests.Add(-1) }, nil -} - -func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { - s.writeRequests.Add(1) - return func() { s.writeRequests.Add(-1) }, nil -} - -func (s *rebuildLimiterStub) ValidateReleased() error { - if v := s.slots.Load(); v != 0 { - return fmt.Errorf("invalid slots value %d", v) - } - if v := s.readRequests.Load(); v != 0 { - return fmt.Errorf("invalid read requests value %d", v) - } - if v := s.writeRequests.Load(); v != 0 { - return fmt.Errorf("invalid write requests value %d", v) - } - return nil -} diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go deleted file mode 100644 index ceaf2538a..000000000 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ /dev/null @@ -1,120 +0,0 @@ -package blobstor - -import ( - "context" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -// SubStorage represents single storage component with some storage policy. -type SubStorage struct { - Storage common.Storage - Policy func(*objectSDK.Object, []byte) bool -} - -// BlobStor represents FrostFS local BLOB storage. -type BlobStor struct { - cfg - - modeMtx sync.RWMutex - mode mode.Mode -} - -// Info contains information about blobstor. -type Info struct { - SubStorages []SubStorageInfo -} - -// SubStorageInfo contains information about blobstor storage component. -type SubStorageInfo struct { - Type string - Path string -} - -// Option represents BlobStor's constructor option. -type Option func(*cfg) - -type cfg struct { - compression compression.Compressor - log *logger.Logger - storage []SubStorage - metrics Metrics -} - -func initConfig(c *cfg) { - c.log = logger.NewLoggerWrapper(zap.L()) - c.metrics = &noopMetrics{} -} - -// New creates, initializes and returns new BlobStor instance. -func New(opts ...Option) *BlobStor { - bs := new(BlobStor) - bs.mode = mode.Disabled - initConfig(&bs.cfg) - - for i := range opts { - opts[i](&bs.cfg) - } - - for i := range bs.storage { - bs.storage[i].Storage.SetCompressor(&bs.compression) - } - - return bs -} - -// SetLogger sets logger. It is used after the shard ID was generated to use it in logs. -func (b *BlobStor) SetLogger(l *logger.Logger) { - b.log = l -} - -func (b *BlobStor) SetParentID(parentID string) { - b.metrics.SetParentID(parentID) - for _, ss := range b.storage { - ss.Storage.SetParentID(parentID) - } -} - -// WithStorages provides sub-blobstors. -func WithStorages(st []SubStorage) Option { - return func(c *cfg) { - c.storage = st - } -} - -// WithLogger returns option to specify BlobStor's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - -func WithCompression(comp compression.Config) Option { - return func(c *cfg) { - c.compression.Config = comp - } -} - -// SetReportErrorFunc allows to provide a function to be called on disk errors. -// This function MUST be called before Open. -func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) { - for i := range b.storage { - b.storage[i].Storage.SetReportErrorFunc(f) - } -} - -func WithMetrics(m Metrics) Option { - return func(c *cfg) { - c.metrics = m - } -} - -func (b *BlobStor) Compressor() *compression.Compressor { - return &b.compression -} diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go deleted file mode 100644 index 6ddeb6f00..000000000 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ /dev/null @@ -1,338 +0,0 @@ -package blobstor - -import ( - "context" - "path/filepath" - "sync" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func defaultTestStorages(p string, smallSizeLimit uint64) ([]SubStorage, *teststore.TestStore, *teststore.TestStore) { - smallFileStorage := teststore.New(teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithRootPath(filepath.Join(p, "blobovniczas")), - blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init - )) - largeFileStorage := teststore.New(teststore.WithSubstorage(fstree.New(fstree.WithPath(p)))) - return []SubStorage{ - { - Storage: smallFileStorage, - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) <= smallSizeLimit - }, - }, - { - Storage: largeFileStorage, - }, - }, smallFileStorage, largeFileStorage -} - -func defaultStorages(p string, smallSizeLimit uint64) []SubStorage { - storages, _, _ := defaultTestStorages(p, smallSizeLimit) - return storages -} - -func TestCompression(t *testing.T) { - dir := t.TempDir() - - const ( - smallSizeLimit = 512 - objCount = 4 - ) - - newBlobStor := func(t *testing.T, compress bool) *BlobStor { - bs := New( - WithCompression(compression.Config{ - Enabled: compress, - }), - WithStorages(defaultStorages(dir, smallSizeLimit))) - require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init(context.Background())) - return bs - } - - bigObj := make([]*objectSDK.Object, objCount) - smallObj := make([]*objectSDK.Object, objCount) - for i := range objCount { - bigObj[i] = testObject(smallSizeLimit * 2) - smallObj[i] = testObject(smallSizeLimit / 2) - } - - testGet := func(t *testing.T, b *BlobStor, i int) { - res1, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(smallObj[i])}) - require.NoError(t, err) - require.Equal(t, smallObj[i], res1.Object) - - res2, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(bigObj[i])}) - require.NoError(t, err) - require.Equal(t, bigObj[i], res2.Object) - } - - testPut := func(t *testing.T, b *BlobStor, i int) { - var prm common.PutPrm - prm.Object = smallObj[i] - _, err := b.Put(context.Background(), prm) - require.NoError(t, err) - - prm = common.PutPrm{} - prm.Object = bigObj[i] - _, err = b.Put(context.Background(), prm) - require.NoError(t, err) - } - - // Put and Get uncompressed object - blobStor := newBlobStor(t, false) - testPut(t, blobStor, 0) - testGet(t, blobStor, 0) - require.NoError(t, blobStor.Close(context.Background())) - - blobStor = newBlobStor(t, true) - testGet(t, blobStor, 0) // get uncompressed object with compress enabled - testPut(t, blobStor, 1) - testGet(t, blobStor, 1) - require.NoError(t, blobStor.Close(context.Background())) - - blobStor = newBlobStor(t, false) - testGet(t, blobStor, 0) // get old uncompressed object - testGet(t, blobStor, 1) // get compressed object with compression disabled - testPut(t, blobStor, 2) - testGet(t, blobStor, 2) - require.NoError(t, blobStor.Close(context.Background())) -} - -func TestBlobstor_needsCompression(t *testing.T) { - const smallSizeLimit = 512 - newBlobStor := func(t *testing.T, compress bool, ct ...string) *BlobStor { - dir := t.TempDir() - - bs := New( - WithCompression(compression.Config{ - Enabled: compress, - UncompressableContentTypes: ct, - }), - WithStorages([]SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithRootPath(filepath.Join(dir, "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < smallSizeLimit - }, - }, - { - Storage: fstree.New(fstree.WithPath(dir)), - }, - })) - require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init(context.Background())) - return bs - } - - newObjectWithCt := func(contentType string) *objectSDK.Object { - obj := testObject(smallSizeLimit + 1) - if contentType != "" { - var a objectSDK.Attribute - a.SetKey(objectSDK.AttributeContentType) - a.SetValue(contentType) - obj.SetAttributes(a) - } - return obj - } - - t.Run("content-types specified", func(t *testing.T) { - b := newBlobStor(t, true, "audio/*", "*/x-mpeg", "*/mpeg", "application/x-midi") - - obj := newObjectWithCt("video/mpeg") - require.False(t, b.compression.NeedsCompression(obj)) - - obj = newObjectWithCt("audio/aiff") - require.False(t, b.compression.NeedsCompression(obj)) - - obj = newObjectWithCt("application/x-midi") - require.False(t, b.compression.NeedsCompression(obj)) - - obj = newObjectWithCt("text/plain") - require.True(t, b.compression.NeedsCompression(obj)) - - obj = newObjectWithCt("") - require.True(t, b.compression.NeedsCompression(obj)) - }) - t.Run("content-types omitted", func(t *testing.T) { - b := newBlobStor(t, true) - obj := newObjectWithCt("video/mpeg") - require.True(t, b.compression.NeedsCompression(obj)) - }) - t.Run("compress disabled", func(t *testing.T) { - b := newBlobStor(t, false, "video/mpeg") - - obj := newObjectWithCt("video/mpeg") - require.False(t, b.compression.NeedsCompression(obj)) - - obj = newObjectWithCt("text/plain") - require.False(t, b.compression.NeedsCompression(obj)) - }) -} - -func TestConcurrentPut(t *testing.T) { - dir := t.TempDir() - - const ( - smallSizeLimit = 512 - - // concurrentPutCount is fstree implementation specific - concurrentPutCount = 5 - ) - - blobStor := New( - WithStorages(defaultStorages(dir, smallSizeLimit))) - require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init(context.Background())) - - testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { - res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)}) - require.NoError(t, err) - require.Equal(t, obj, res.Object) - } - - testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { - var prm common.PutPrm - prm.Object = obj - _, err := b.Put(context.Background(), prm) - require.NoError(t, err) - } - - testPutFileExistsError := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { - var prm common.PutPrm - prm.Object = obj - if _, err := b.Put(context.Background(), prm); err != nil { - require.ErrorContains(t, err, "file exists") - } - } - - t.Run("put the same big object", func(t *testing.T) { - bigObj := testObject(smallSizeLimit * 2) - - var wg sync.WaitGroup - for range concurrentPutCount { - wg.Add(1) - go func() { - testPut(t, blobStor, bigObj) - wg.Done() - }() - } - wg.Wait() - - testGet(t, blobStor, bigObj) - }) - - t.Run("put the same big object with error", func(t *testing.T) { - bigObj := testObject(smallSizeLimit * 2) - - var wg sync.WaitGroup - for range concurrentPutCount + 1 { - wg.Add(1) - go func() { - testPutFileExistsError(t, blobStor, bigObj) - wg.Done() - }() - } - wg.Wait() - - testGet(t, blobStor, bigObj) - }) - - t.Run("put the same small object", func(t *testing.T) { - smallObj := testObject(smallSizeLimit / 2) - - var wg sync.WaitGroup - for range concurrentPutCount { - wg.Add(1) - go func() { - testPut(t, blobStor, smallObj) - wg.Done() - }() - } - wg.Wait() - - testGet(t, blobStor, smallObj) - }) -} - -func TestConcurrentDelete(t *testing.T) { - dir := t.TempDir() - - const smallSizeLimit = 512 - - blobStor := New( - WithStorages(defaultStorages(dir, smallSizeLimit))) - require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init(context.Background())) - - testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { - var prm common.PutPrm - prm.Object = obj - _, err := b.Put(context.Background(), prm) - require.NoError(t, err) - } - - testDelete := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { - var prm common.DeletePrm - prm.Address = object.AddressOf(obj) - if _, err := b.Delete(context.Background(), prm); err != nil { - require.ErrorContains(t, err, "object not found") - } - } - - testDeletedExists := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { - var prm common.ExistsPrm - prm.Address = object.AddressOf(obj) - res, err := b.Exists(context.Background(), prm) - require.NoError(t, err) - require.False(t, res.Exists) - } - - t.Run("delete the same big object", func(t *testing.T) { - bigObj := testObject(smallSizeLimit * 2) - testPut(t, blobStor, bigObj) - - var wg sync.WaitGroup - for range 2 { - wg.Add(1) - go func() { - testDelete(t, blobStor, bigObj) - wg.Done() - }() - } - wg.Wait() - - testDeletedExists(t, blobStor, bigObj) - }) - - t.Run("delete the same small object", func(t *testing.T) { - smallObj := testObject(smallSizeLimit / 2) - testPut(t, blobStor, smallObj) - - var wg sync.WaitGroup - for range 2 { - wg.Add(1) - go func() { - testDelete(t, blobStor, smallObj) - wg.Done() - }() - } - wg.Wait() - - testDeletedExists(t, blobStor, smallObj) - }) -} diff --git a/pkg/local_object_storage/blobstor/common/delete.go b/pkg/local_object_storage/blobstor/common/delete.go deleted file mode 100644 index c19e099cb..000000000 --- a/pkg/local_object_storage/blobstor/common/delete.go +++ /dev/null @@ -1,15 +0,0 @@ -package common - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// DeletePrm groups the parameters of Delete operation. -type DeletePrm struct { - Address oid.Address - StorageID []byte - Size uint64 -} - -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct{} diff --git a/pkg/local_object_storage/blobstor/common/errors.go b/pkg/local_object_storage/blobstor/common/errors.go deleted file mode 100644 index 46a84c736..000000000 --- a/pkg/local_object_storage/blobstor/common/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package common - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" -) - -// ErrReadOnly MUST be returned for modifying operations when the storage was opened -// in readonly mode. -var ErrReadOnly = logicerr.New("opened as read-only") - -// ErrNoSpace MUST be returned when there is no space to put an object on the device. -var ErrNoSpace = logicerr.New("no free space") diff --git a/pkg/local_object_storage/blobstor/common/exists.go b/pkg/local_object_storage/blobstor/common/exists.go deleted file mode 100644 index d9bdbd33a..000000000 --- a/pkg/local_object_storage/blobstor/common/exists.go +++ /dev/null @@ -1,14 +0,0 @@ -package common - -import oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - -// ExistsPrm groups the parameters of Exists operation. -type ExistsPrm struct { - Address oid.Address - StorageID []byte -} - -// ExistsRes groups the resulting values of Exists operation. -type ExistsRes struct { - Exists bool -} diff --git a/pkg/local_object_storage/blobstor/common/get.go b/pkg/local_object_storage/blobstor/common/get.go deleted file mode 100644 index 0d036219f..000000000 --- a/pkg/local_object_storage/blobstor/common/get.go +++ /dev/null @@ -1,17 +0,0 @@ -package common - -import ( - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type GetPrm struct { - Address oid.Address - StorageID []byte - Raw bool -} - -type GetRes struct { - Object *objectSDK.Object - RawData []byte -} diff --git a/pkg/local_object_storage/blobstor/common/get_range.go b/pkg/local_object_storage/blobstor/common/get_range.go deleted file mode 100644 index 1c4f95c02..000000000 --- a/pkg/local_object_storage/blobstor/common/get_range.go +++ /dev/null @@ -1,16 +0,0 @@ -package common - -import ( - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type GetRangePrm struct { - Address oid.Address - Range objectSDK.Range - StorageID []byte -} - -type GetRangeRes struct { - Data []byte -} diff --git a/pkg/local_object_storage/blobstor/common/iterate.go b/pkg/local_object_storage/blobstor/common/iterate.go deleted file mode 100644 index a1b8ff047..000000000 --- a/pkg/local_object_storage/blobstor/common/iterate.go +++ /dev/null @@ -1,22 +0,0 @@ -package common - -import oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - -// IterationElement represents a unit of elements through which Iterate operation passes. -type IterationElement struct { - ObjectData []byte - Address oid.Address - StorageID []byte -} - -// IterationHandler is a generic processor of IterationElement. -type IterationHandler func(IterationElement) error - -// IteratePrm groups the parameters of Iterate operation. -type IteratePrm struct { - Handler IterationHandler - IgnoreErrors bool -} - -// IterateRes groups the resulting values of Iterate operation. -type IterateRes struct{} diff --git a/pkg/local_object_storage/blobstor/common/put.go b/pkg/local_object_storage/blobstor/common/put.go deleted file mode 100644 index 51bb9624e..000000000 --- a/pkg/local_object_storage/blobstor/common/put.go +++ /dev/null @@ -1,19 +0,0 @@ -package common - -import ( - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// PutPrm groups the parameters of Put operation. -type PutPrm struct { - Address oid.Address - Object *objectSDK.Object - RawData []byte - DontCompress bool -} - -// PutRes groups the resulting values of Put operation. -type PutRes struct { - StorageID []byte -} diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go deleted file mode 100644 index 788fe66f2..000000000 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ /dev/null @@ -1,38 +0,0 @@ -package common - -import ( - "context" - - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type RebuildRes struct { - ObjectsMoved uint64 - FilesRemoved uint64 -} - -type RebuildPrm struct { - MetaStorage MetaStorage - Limiter RebuildLimiter - FillPercent int -} - -type MetaStorage interface { - UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error -} - -type ReleaseFunc func() - -type ConcurrencyLimiter interface { - AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) -} - -type RateLimiter interface { - ReadRequest(context.Context) (ReleaseFunc, error) - WriteRequest(context.Context) (ReleaseFunc, error) -} - -type RebuildLimiter interface { - ConcurrencyLimiter - RateLimiter -} diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go deleted file mode 100644 index e35c35e60..000000000 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ /dev/null @@ -1,36 +0,0 @@ -package common - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -// Storage represents key-value object storage. -// It is used as a building block for a blobstor of a shard. -type Storage interface { - Open(mode mode.ComponentMode) error - Init() error - Close(context.Context) error - - Type() string - Path() string - ObjectsCount(ctx context.Context) (uint64, error) - - SetCompressor(cc *compression.Compressor) - Compressor() *compression.Compressor - - // SetReportErrorFunc allows to provide a function to be called on disk errors. - // This function MUST be called before Open. - SetReportErrorFunc(f func(context.Context, string, error)) - SetParentID(parentID string) - - Get(context.Context, GetPrm) (GetRes, error) - GetRange(context.Context, GetRangePrm) (GetRangeRes, error) - Exists(context.Context, ExistsPrm) (ExistsRes, error) - Put(context.Context, PutPrm) (PutRes, error) - Delete(context.Context, DeletePrm) (DeleteRes, error) - Iterate(context.Context, IteratePrm) (IterateRes, error) - Rebuild(context.Context, RebuildPrm) (RebuildRes, error) -} diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go deleted file mode 100644 index 445a0494b..000000000 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package compression - -import ( - "crypto/rand" - "fmt" - "log" - "testing" - - "github.com/klauspost/compress" - "github.com/stretchr/testify/require" -) - -func BenchmarkCompression(b *testing.B) { - c := Compressor{Config: Config{Enabled: true}} - require.NoError(b, c.Init()) - - for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { - b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { - b.Run("zeroed slice", func(b *testing.B) { - data := make([]byte, size) - benchWith(b, c, data) - }) - b.Run("not so random slice (block = 123)", func(b *testing.B) { - data := notSoRandomSlice(size, 123) - benchWith(b, c, data) - }) - b.Run("random slice", func(b *testing.B) { - data := make([]byte, size) - rand.Read(data) - benchWith(b, c, data) - }) - }) - } -} - -func benchWith(b *testing.B, c Compressor, data []byte) { - b.ResetTimer() - b.ReportAllocs() - for range b.N { - _ = c.Compress(data) - } -} - -func notSoRandomSlice(size, blockSize int) []byte { - data := make([]byte, size) - rand.Read(data[:blockSize]) - for i := blockSize; i < size; i += blockSize { - copy(data[i:], data[:blockSize]) - } - return data -} - -func BenchmarkCompressionRealVSEstimate(b *testing.B) { - var total float64 // to prevent from compiler optimizations - maxSize := 60 * 1024 * 1024 - b.Run("estimate", func(b *testing.B) { - b.ResetTimer() - - c := &Compressor{ - Config: Config{ - Enabled: true, - }, - } - require.NoError(b, c.Init()) - - for size := 1024; size <= maxSize; size *= 2 { - data := make([]byte, size) - _, err := rand.Reader.Read(data) - require.NoError(b, err) - - b.StartTimer() - estimation := compress.Estimate(data) - total += estimation - b.StopTimer() - } - }) - - b.Run("compress", func(b *testing.B) { - b.ResetTimer() - - c := &Compressor{ - Config: Config{ - Enabled: true, - }, - } - require.NoError(b, c.Init()) - - for size := 1024; size <= maxSize; size *= 2 { - data := make([]byte, size) - _, err := rand.Reader.Read(data) - require.NoError(b, err) - - b.StartTimer() - maxSize := c.encoder.MaxEncodedSize(len(data)) - compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) - total += float64(len(compressed)) / float64(len(data)) - b.StopTimer() - } - }) - - log.Println(total) -} diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go deleted file mode 100644 index c76cec9a1..000000000 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ /dev/null @@ -1,154 +0,0 @@ -package compression - -import ( - "bytes" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/klauspost/compress" - "github.com/klauspost/compress/zstd" -) - -type Level string - -const ( - LevelDefault Level = "" - LevelOptimal Level = "optimal" - LevelFastest Level = "fastest" - LevelSmallestSize Level = "smallest_size" -) - -type Compressor struct { - Config - - encoder *zstd.Encoder - decoder *zstd.Decoder -} - -// Config represents common compression-related configuration. -type Config struct { - Enabled bool - UncompressableContentTypes []string - Level Level - - EstimateCompressibility bool - EstimateCompressibilityThreshold float64 -} - -// zstdFrameMagic contains first 4 bytes of any compressed object -// https://github.com/klauspost/compress/blob/master/zstd/framedec.go#L58 . -var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - -// Init initializes compression routines. -func (c *Compressor) Init() error { - var err error - - if c.Enabled { - c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) - if err != nil { - return err - } - } - - c.decoder, err = zstd.NewReader(nil) - return err -} - -// NeedsCompression returns true if the object should be compressed. -// For an object to be compressed 2 conditions must hold: -// 1. Compression is enabled in settings. -// 2. Object MIME Content-Type is allowed for compression. -func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { - if !c.Enabled || len(c.UncompressableContentTypes) == 0 { - return c.Enabled - } - - for _, attr := range obj.Attributes() { - if attr.Key() == objectSDK.AttributeContentType { - for _, value := range c.UncompressableContentTypes { - match := false - switch { - case len(value) > 0 && value[len(value)-1] == '*': - match = strings.HasPrefix(attr.Value(), value[:len(value)-1]) - case len(value) > 0 && value[0] == '*': - match = strings.HasSuffix(attr.Value(), value[1:]) - default: - match = attr.Value() == value - } - if match { - return false - } - } - } - } - - return c.Enabled -} - -// Decompress decompresses data if it starts with the magic -// and returns data untouched otherwise. -func (c *Compressor) Decompress(data []byte) ([]byte, error) { - if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { - return data, nil - } - return c.decoder.DecodeAll(data, nil) -} - -// Compress compresses data if compression is enabled -// and returns data untouched otherwise. -func (c *Compressor) Compress(data []byte) []byte { - if c == nil || !c.Enabled { - return data - } - if c.EstimateCompressibility { - estimated := compress.Estimate(data) - if estimated >= c.EstimateCompressibilityThreshold { - return c.compress(data) - } - return data - } - return c.compress(data) -} - -func (c *Compressor) compress(data []byte) []byte { - maxSize := c.encoder.MaxEncodedSize(len(data)) - compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) - if len(data) < len(compressed) { - return data - } - return compressed -} - -// Close closes encoder and decoder, returns any error occurred. -func (c *Compressor) Close() error { - var err error - if c.encoder != nil { - err = c.encoder.Close() - } - if c.decoder != nil { - c.decoder.Close() - } - return err -} - -func (c *Config) HasValidCompressionLevel() bool { - return c.Level == LevelDefault || - c.Level == LevelOptimal || - c.Level == LevelFastest || - c.Level == LevelSmallestSize -} - -func (c *Compressor) compressionLevel() zstd.EncoderLevel { - switch c.Level { - case LevelDefault, LevelOptimal: - return zstd.SpeedDefault - case LevelFastest: - return zstd.SpeedFastest - case LevelSmallestSize: - return zstd.SpeedBestCompression - default: - assert.Fail("unknown compression level", string(c.Level)) - return zstd.SpeedDefault - } -} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go deleted file mode 100644 index 0418eedd0..000000000 --- a/pkg/local_object_storage/blobstor/control.go +++ /dev/null @@ -1,98 +0,0 @@ -package blobstor - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "go.uber.org/zap" -) - -// Open opens BlobStor. -func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error { - b.log.Debug(ctx, logs.BlobstorOpening) - - b.modeMtx.Lock() - defer b.modeMtx.Unlock() - b.mode = mode - - err := b.openBlobStor(ctx, mode) - if err != nil { - return err - } - b.metrics.SetMode(mode.ReadOnly()) - - return nil -} - -func (b *BlobStor) openBlobStor(ctx context.Context, mod mode.Mode) error { - for i := range b.storage { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - err := b.storage[i].Storage.Open(mode.ConvertToComponentMode(mod)) - if err != nil { - return err - } - } - return nil -} - -// ErrInitBlobovniczas is returned when blobovnicza initialization fails. -var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stage") - -// Init initializes internal data structures and system resources. -// -// If BlobStor is already initialized, no action is taken. -// -// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. -func (b *BlobStor) Init(ctx context.Context) error { - b.log.Debug(ctx, logs.BlobstorInitializing) - - if !b.compression.HasValidCompressionLevel() { - b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) - b.compression.Level = compression.LevelDefault - } - if err := b.compression.Init(); err != nil { - return err - } - - for i := range b.storage { - err := b.storage[i].Storage.Init() - if err != nil { - return fmt.Errorf("%w: %v", ErrInitBlobovniczas, err) - } - } - return nil -} - -// Close releases all internal resources of BlobStor. -func (b *BlobStor) Close(ctx context.Context) error { - b.log.Debug(ctx, logs.BlobstorClosing) - - var firstErr error - for i := range b.storage { - err := b.storage[i].Storage.Close(ctx) - if err != nil { - b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err)) - if firstErr == nil { - firstErr = err - } - continue - } - } - - err := b.compression.Close() - if firstErr == nil { - firstErr = err - } - if firstErr == nil { - b.metrics.Close() - } - return firstErr -} diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go deleted file mode 100644 index 86d8f15e3..000000000 --- a/pkg/local_object_storage/blobstor/delete.go +++ /dev/null @@ -1,65 +0,0 @@ -package blobstor - -import ( - "context" - "encoding/hex" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - b.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Delete", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - )) - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - if prm.StorageID == nil { - for i := range b.storage { - res, err := b.storage[i].Storage.Delete(ctx, prm) - if err == nil || !client.IsErrObjectNotFound(err) { - if err == nil { - success = true - logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID) - } - return res, err - } - } - return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - var st common.Storage - - if len(prm.StorageID) == 0 { - st = b.storage[len(b.storage)-1].Storage - } else { - st = b.storage[0].Storage - } - - res, err := st.Delete(ctx, prm) - if err == nil { - success = true - logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID) - } - - return res, err -} diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go deleted file mode 100644 index c155e15b8..000000000 --- a/pkg/local_object_storage/blobstor/exists.go +++ /dev/null @@ -1,81 +0,0 @@ -package blobstor - -import ( - "context" - "encoding/hex" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Exists checks if the object is presented in BLOB storage. -// -// Returns any error encountered that did not allow -// to completely check object existence. -func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { - var ( - exists = false - startedAt = time.Now() - ) - defer func() { - b.metrics.Exists(time.Since(startedAt), exists, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Exists", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - )) - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - if prm.StorageID != nil { - if len(prm.StorageID) == 0 { - res, err := b.storage[len(b.storage)-1].Storage.Exists(ctx, prm) - exists = err == nil && res.Exists - return res, err - } - res, err := b.storage[0].Storage.Exists(ctx, prm) - exists = err == nil && res.Exists - return res, err - } - - // If there was an error during existence check below, - // it will be returned unless object was found in blobovnicza. - // Otherwise, it is logged and the latest error is returned. - // FSTree | Blobovnicza | Behaviour - // found | (not tried) | return true, nil - // not found | any result | return the result - // error | found | log the error, return true, nil - // error | not found | return the error - // error | error | log the first error, return the second - var errors []error - for i := range b.storage { - res, err := b.storage[i].Storage.Exists(ctx, prm) - if err == nil && res.Exists { - exists = true - return res, nil - } else if err != nil { - errors = append(errors, err) - } - } - - if len(errors) == 0 { - return common.ExistsRes{}, nil - } - - for _, err := range errors[:len(errors)-1] { - b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, - zap.Stringer("address", prm.Address), - zap.Error(err)) - } - - return common.ExistsRes{}, errors[len(errors)-1] -} diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go deleted file mode 100644 index 7eb7d49bf..000000000 --- a/pkg/local_object_storage/blobstor/exists_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package blobstor - -import ( - "context" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestExists(t *testing.T) { - const smallSizeLimit = 512 - - storages, _, largeFileStorage := defaultTestStorages(t.TempDir(), smallSizeLimit) - - b := New(WithStorages(storages)) - - require.NoError(t, b.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, b.Init(context.Background())) - - objects := []*objectSDK.Object{ - testObject(smallSizeLimit / 2), - testObject(smallSizeLimit + 1), - } - - for i := range objects { - var prm common.PutPrm - prm.Object = objects[i] - _, err := b.Put(context.Background(), prm) - require.NoError(t, err) - } - - var prm common.ExistsPrm - for i := range objects { - prm.Address = objectCore.AddressOf(objects[i]) - - res, err := b.Exists(context.Background(), prm) - require.NoError(t, err) - require.True(t, res.Exists) - } - - prm.Address = oidtest.Address() - res, err := b.Exists(context.Background(), prm) - require.NoError(t, err) - require.False(t, res.Exists) - - t.Run("corrupt directory", func(t *testing.T) { - largeFileStorage.SetOption(teststore.WithExists(func(common.ExistsPrm) (common.ExistsRes, error) { - return common.ExistsRes{}, teststore.ErrDiskExploded - })) - - // Object exists, first error is logged. - prm.Address = objectCore.AddressOf(objects[0]) - res, err := b.Exists(context.Background(), prm) - require.NoError(t, err) - require.True(t, res.Exists) - - // Object doesn't exist, first error is returned. - prm.Address = objectCore.AddressOf(objects[1]) - _, err = b.Exists(context.Background(), prm) - require.Error(t, err) - require.ErrorIs(t, err, teststore.ErrDiskExploded) - }) -} - -func testObject(sz uint64) *objectSDK.Object { - raw := objectSDK.New() - - raw.SetID(oidtest.ID()) - raw.SetContainerID(cidtest.ID()) - - raw.SetPayload(make([]byte, sz)) - - // fit the binary size to the required - data, _ := raw.Marshal() - if ln := uint64(len(data)); ln > sz { - raw.SetPayload(raw.Payload()[:sz-(ln-sz)]) - } - - return raw -} diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go deleted file mode 100644 index 2544729f7..000000000 --- a/pkg/local_object_storage/blobstor/fstree/control.go +++ /dev/null @@ -1,36 +0,0 @@ -package fstree - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" -) - -// Open implements common.Storage. -func (t *FSTree) Open(mode mode.ComponentMode) error { - t.readOnly = mode.ReadOnly() - t.metrics.SetMode(mode) - return nil -} - -// Init implements common.Storage. -func (t *FSTree) Init() error { - if err := util.MkdirAllX(t.RootPath, t.Permissions); err != nil { - return err - } - if !t.readOnly { - f := newSpecificWriteData(t.fileCounter, t.RootPath, t.Permissions, t.noSync) - if f != nil { - t.writer = f - } - } - - return t.initFileCounter() -} - -// Close implements common.Storage. -func (t *FSTree) Close(_ context.Context) error { - t.metrics.Close() - return nil -} diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go deleted file mode 100644 index 3caee7ee1..000000000 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ /dev/null @@ -1,69 +0,0 @@ -package fstree - -import ( - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" -) - -// FileCounter used to count files in FSTree. The implementation must be thread-safe. -type FileCounter interface { - Set(count, size uint64) - Inc(size uint64) - Dec(size uint64) -} - -type noopCounter struct{} - -func (c *noopCounter) Set(uint64, uint64) {} -func (c *noopCounter) Inc(uint64) {} -func (c *noopCounter) Dec(uint64) {} - -func counterEnabled(c FileCounter) bool { - _, noop := c.(*noopCounter) - return !noop -} - -type SimpleCounter struct { - mtx sync.RWMutex - count uint64 - size uint64 -} - -func NewSimpleCounter() *SimpleCounter { - return &SimpleCounter{} -} - -func (c *SimpleCounter) Set(count, size uint64) { - c.mtx.Lock() - defer c.mtx.Unlock() - - c.count = count - c.size = size -} - -func (c *SimpleCounter) Inc(size uint64) { - c.mtx.Lock() - defer c.mtx.Unlock() - - c.count++ - c.size += size -} - -func (c *SimpleCounter) Dec(size uint64) { - c.mtx.Lock() - defer c.mtx.Unlock() - - assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") - c.count-- - - assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") - c.size -= size -} - -func (c *SimpleCounter) CountSize() (uint64, uint64) { - c.mtx.RLock() - defer c.mtx.RUnlock() - - return c.count, c.size -} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go deleted file mode 100644 index 112741ab4..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ /dev/null @@ -1,619 +0,0 @@ -package fstree - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -type keyLock interface { - Lock(string) - Unlock(string) -} - -type noopKeyLock struct{} - -func (l *noopKeyLock) Lock(string) {} -func (l *noopKeyLock) Unlock(string) {} - -// FSTree represents an object storage as a filesystem tree. -type FSTree struct { - Info - - log *logger.Logger - - compressor *compression.Compressor - Depth uint64 - DirNameLen int - - noSync bool - readOnly bool - metrics Metrics - - fileCounter FileCounter - - writer writer -} - -// Info groups the information about file storage. -type Info struct { - // Permission bits of the root directory. - Permissions fs.FileMode - - // Full path to the root directory. - RootPath string -} - -const ( - // DirNameLen is how many bytes is used to group keys into directories. - DirNameLen = 1 // in bytes - // MaxDepth is maximum depth of nested directories. - MaxDepth = (sha256.Size - 1) / DirNameLen -) - -var _ common.Storage = (*FSTree)(nil) - -func New(opts ...Option) *FSTree { - f := &FSTree{ - Info: Info{ - Permissions: 0o700, - RootPath: "./", - }, - compressor: nil, - Depth: 4, - DirNameLen: DirNameLen, - metrics: &noopMetrics{}, - fileCounter: &noopCounter{}, - log: logger.NewLoggerWrapper(zap.L()), - } - for i := range opts { - opts[i](f) - } - f.writer = newGenericWriteData(f.fileCounter, f.Permissions, f.noSync) - - return f -} - -func stringifyAddress(addr oid.Address) string { - return addr.Object().EncodeToString() + "." + addr.Container().EncodeToString() -} - -func addressFromString(s string) (oid.Address, error) { - before, after, found := strings.Cut(s, ".") - if !found { - return oid.Address{}, errors.New("invalid address") - } - - var obj oid.ID - if err := obj.DecodeString(before); err != nil { - return oid.Address{}, err - } - - var cnr cid.ID - if err := cnr.DecodeString(after); err != nil { - return oid.Address{}, err - } - - var addr oid.Address - addr.SetObject(obj) - addr.SetContainer(cnr) - - return addr, nil -} - -// Iterate iterates over all stored objects. -func (t *FSTree) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) { - var ( - err error - startedAt = time.Now() - ) - - defer func() { - t.metrics.Iterate(time.Since(startedAt), err == nil) - }() - - _, span := tracing.StartSpanFromContext(ctx, "FSTree.Iterate", - trace.WithAttributes( - attribute.String("path", t.RootPath), - attribute.Bool("ignore_errors", prm.IgnoreErrors), - )) - defer span.End() - - err = t.iterate(ctx, 0, []string{t.RootPath}, prm) - return common.IterateRes{}, err -} - -func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, prm common.IteratePrm) error { - curName := strings.Join(curPath[1:], "") - dirPath := filepath.Join(curPath...) - des, err := os.ReadDir(dirPath) - if err != nil { - if prm.IgnoreErrors { - t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.Error(err), - zap.String("directory_path", dirPath)) - return nil - } - return err - } - - isLast := depth >= t.Depth - l := len(curPath) - curPath = append(curPath, "") - - for i := range des { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - curPath[l] = des[i].Name() - - if !isLast && des[i].IsDir() { - err := t.iterate(ctx, depth+1, curPath, prm) - if err != nil { - // Must be error from handler in case errors are ignored. - // Need to report. - return err - } - } - - if depth != t.Depth { - continue - } - - addr, err := addressFromString(curName + des[i].Name()) - if err != nil { - continue - } - path := filepath.Join(curPath...) - data, err := os.ReadFile(path) - if err != nil && os.IsNotExist(err) { - continue - } - - if err == nil { - data, err = t.compressor.Decompress(data) - } - if err != nil { - if prm.IgnoreErrors { - t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.Stringer("address", addr), - zap.Error(err), - zap.String("path", path)) - continue - } - return err - } - - err = prm.Handler(common.IterationElement{ - Address: addr, - ObjectData: data, - StorageID: []byte{}, - }) - if err != nil { - return err - } - } - - return nil -} - -type ObjectInfo struct { - Address oid.Address - DataSize uint64 -} -type IterateInfoHandler func(ObjectInfo) error - -func (t *FSTree) IterateInfo(ctx context.Context, handler IterateInfoHandler) error { - var ( - err error - startedAt = time.Now() - ) - defer func() { - t.metrics.IterateInfo(time.Since(startedAt), err == nil) - }() - _, span := tracing.StartSpanFromContext(ctx, "FSTree.IterateInfo") - defer span.End() - - return t.iterateInfo(ctx, 0, []string{t.RootPath}, handler) -} - -func (t *FSTree) iterateInfo(ctx context.Context, depth uint64, curPath []string, handler IterateInfoHandler) error { - curName := strings.Join(curPath[1:], "") - dirPath := filepath.Join(curPath...) - entries, err := os.ReadDir(dirPath) - if err != nil { - return fmt.Errorf("read fstree dir '%s': %w", dirPath, err) - } - - isLast := depth >= t.Depth - l := len(curPath) - curPath = append(curPath, "") - - for i := range entries { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - curPath[l] = entries[i].Name() - - if !isLast && entries[i].IsDir() { - err := t.iterateInfo(ctx, depth+1, curPath, handler) - if err != nil { - return err - } - } - - if depth != t.Depth { - continue - } - - addr, err := addressFromString(curName + entries[i].Name()) - if err != nil { - continue - } - info, err := entries[i].Info() - if err != nil { - if os.IsNotExist(err) { - continue - } - return err - } - - err = handler(ObjectInfo{ - Address: addr, - DataSize: uint64(info.Size()), - }) - if err != nil { - return err - } - } - - return nil -} - -func (t *FSTree) treePath(addr oid.Address) string { - sAddr := stringifyAddress(addr) - - var sb strings.Builder - sb.Grow(len(t.RootPath) + len(sAddr) + int(t.Depth) + 1) - sb.WriteString(t.RootPath) - - for i := 0; uint64(i) < t.Depth; i++ { - sb.WriteRune(filepath.Separator) - sb.WriteString(sAddr[:t.DirNameLen]) - sAddr = sAddr[t.DirNameLen:] - } - - sb.WriteRune(filepath.Separator) - sb.WriteString(sAddr) - return sb.String() -} - -// Delete removes the object with the specified address from the storage. -func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { - var ( - err error - startedAt = time.Now() - ) - defer func() { - t.metrics.Delete(time.Since(startedAt), err == nil) - }() - - _, span := tracing.StartSpanFromContext(ctx, "FSTree.Delete", - trace.WithAttributes( - attribute.String("path", t.RootPath), - attribute.String("address", prm.Address.EncodeToString()), - )) - defer span.End() - - if t.readOnly { - err = common.ErrReadOnly - return common.DeleteRes{}, err - } - - p := t.treePath(prm.Address) - err = t.writer.removeFile(p, prm.Size) - return common.DeleteRes{}, err -} - -// Exists returns the path to the file with object contents if it exists in the storage -// and an error otherwise. -func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { - var ( - success = false - startedAt = time.Now() - ) - defer func() { - t.metrics.Exists(time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "FSTree.Exists", - trace.WithAttributes( - attribute.String("path", t.RootPath), - attribute.String("address", prm.Address.EncodeToString()), - )) - defer span.End() - - p := t.treePath(prm.Address) - - _, err := os.Stat(p) - found := err == nil - if os.IsNotExist(err) { - err = nil - } - success = err == nil - return common.ExistsRes{Exists: found}, err -} - -// Put puts an object in the storage. -func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { - var ( - size int - startedAt = time.Now() - err error - ) - defer func() { - t.metrics.Put(time.Since(startedAt), size, err == nil) - }() - - _, span := tracing.StartSpanFromContext(ctx, "FSTree.Put", - trace.WithAttributes( - attribute.String("path", t.RootPath), - attribute.String("address", prm.Address.EncodeToString()), - attribute.Bool("dont_compress", prm.DontCompress), - )) - defer span.End() - - if t.readOnly { - err = common.ErrReadOnly - return common.PutRes{}, err - } - - p := t.treePath(prm.Address) - - if err = util.MkdirAllX(filepath.Dir(p), t.Permissions); err != nil { - if errors.Is(err, syscall.ENOSPC) { - err = common.ErrNoSpace - return common.PutRes{}, err - } - return common.PutRes{}, err - } - if !prm.DontCompress { - prm.RawData = t.compressor.Compress(prm.RawData) - } - - size = len(prm.RawData) - return common.PutRes{StorageID: []byte{}}, t.writer.writeData(p, prm.RawData) -} - -// Get returns an object from the storage by address. -func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) { - var ( - startedAt = time.Now() - success = false - size = 0 - ) - defer func() { - t.metrics.Get(time.Since(startedAt), size, success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.Get", - trace.WithAttributes( - attribute.String("path", t.RootPath), - attribute.Bool("raw", prm.Raw), - attribute.String("address", prm.Address.EncodeToString()), - )) - defer span.End() - - p := t.treePath(prm.Address) - - var data []byte - var err error - { - _, span := tracing.StartSpanFromContext(ctx, "FSTree.Get.ReadFile") - defer span.End() - - data, err = os.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - return common.GetRes{}, err - } - } - - data, err = t.compressor.Decompress(data) - if err != nil { - return common.GetRes{}, err - } - size = len(data) - - obj := objectSDK.New() - if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, err - } - success = true - return common.GetRes{Object: obj, RawData: data}, nil -} - -// GetRange implements common.Storage. -func (t *FSTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) { - var ( - startedAt = time.Now() - success = false - size = 0 - ) - defer func() { - t.metrics.GetRange(time.Since(startedAt), size, success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.GetRange", - trace.WithAttributes( - attribute.String("path", t.RootPath), - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)), - attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)), - )) - defer span.End() - - res, err := t.Get(ctx, common.GetPrm{Address: prm.Address}) - if err != nil { - return common.GetRangeRes{}, err - } - - payload := res.Object.Payload() - from := prm.Range.GetOffset() - to := from + prm.Range.GetLength() - - if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { - return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) - } - - success = true - data := payload[from:to] - size = len(data) - return common.GetRangeRes{ - Data: data, - }, nil -} - -// initFileCounter walks the file tree rooted at FSTree's root, -// counts total items count, inits counter and returns number of stored objects. -func (t *FSTree) initFileCounter() error { - if !counterEnabled(t.fileCounter) { - return nil - } - - count, size, err := t.countFiles() - if err != nil { - return err - } - t.fileCounter.Set(count, size) - return nil -} - -func (t *FSTree) countFiles() (uint64, uint64, error) { - var count, size uint64 - // it is simpler to just consider every file - // that is not directory as an object - err := filepath.WalkDir(t.RootPath, - func(_ string, d fs.DirEntry, _ error) error { - if d.IsDir() { - return nil - } - count++ - info, err := d.Info() - if err != nil { - return err - } - size += uint64(info.Size()) - - return nil - }, - ) - if err != nil { - return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) - } - - return count, size, nil -} - -func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.ObjectsCount(time.Since(startedAt), success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.ObjectsCount", - trace.WithAttributes( - attribute.String("path", t.RootPath), - )) - defer span.End() - - var result uint64 - - err := filepath.WalkDir(t.RootPath, - func(_ string, d fs.DirEntry, _ error) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if !d.IsDir() { - result++ - } - - return nil - }, - ) - if err != nil { - return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) - } - success = true - return result, nil -} - -// Type is fstree storage type used in logs and configuration. -const Type = "fstree" - -// Type implements common.Storage. -func (*FSTree) Type() string { - return Type -} - -// Path implements common.Storage. -func (t *FSTree) Path() string { - return t.RootPath -} - -// SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Compressor) { - t.compressor = cc -} - -func (t *FSTree) Compressor() *compression.Compressor { - return t.compressor -} - -// SetReportErrorFunc implements common.Storage. -func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) { - // Do nothing, FSTree can encounter only one error which is returned. -} - -func (t *FSTree) SetParentID(parentID string) { - t.metrics.SetParentID(parentID) -} - -func (t *FSTree) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) { - return common.RebuildRes{}, nil -} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go deleted file mode 100644 index 50dae46a7..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package fstree - -import ( - "context" - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestAddressToString(t *testing.T) { - addr := oidtest.Address() - s := stringifyAddress(addr) - actual, err := addressFromString(s) - require.NoError(t, err) - require.Equal(t, addr, actual) -} - -func Benchmark_addressFromString(b *testing.B) { - addr := oidtest.Address() - s := stringifyAddress(addr) - - b.ReportAllocs() - b.ResetTimer() - for range b.N { - _, err := addressFromString(s) - if err != nil { - b.Fatalf("benchmark error: %v", err) - } - } -} - -func TestObjectCounter(t *testing.T) { - t.Parallel() - counter := NewSimpleCounter() - fst := New( - WithPath(t.TempDir()), - WithDepth(2), - WithDirNameLen(2), - WithFileCounter(counter)) - require.NoError(t, fst.Open(mode.ComponentReadWrite)) - require.NoError(t, fst.Init()) - - count, size := counter.CountSize() - require.Equal(t, uint64(0), count) - require.Equal(t, uint64(0), size) - - defer func() { - require.NoError(t, fst.Close(context.Background())) - }() - - addr := oidtest.Address() - obj := objectSDK.New() - obj.SetID(addr.Object()) - obj.SetContainerID(addr.Container()) - obj.SetPayload([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}) - - var putPrm common.PutPrm - putPrm.Address = addr - putPrm.RawData, _ = obj.Marshal() - - var delPrm common.DeletePrm - delPrm.Address = addr - - t.Run("without size hint", func(t *testing.T) { - eg, egCtx := errgroup.WithContext(context.Background()) - - eg.Go(func() error { - for range 1_000 { - _, err := fst.Put(egCtx, putPrm) - if err != nil { - return err - } - } - return nil - }) - - eg.Go(func() error { - var le logicerr.Logical - for range 1_000 { - _, err := fst.Delete(egCtx, delPrm) - if err != nil && !errors.As(err, &le) { - return err - } - } - return nil - }) - - require.NoError(t, eg.Wait()) - - count, size = counter.CountSize() - realCount, realSize, err := fst.countFiles() - require.NoError(t, err) - require.Equal(t, realCount, count, "real %d, actual %d", realCount, count) - require.Equal(t, realSize, size, "real %d, actual %d", realSize, size) - }) - - t.Run("with size hint", func(t *testing.T) { - delPrm.Size = uint64(len(putPrm.RawData)) - eg, egCtx := errgroup.WithContext(context.Background()) - - eg.Go(func() error { - for range 1_000 { - _, err := fst.Put(egCtx, putPrm) - if err != nil { - return err - } - } - return nil - }) - - eg.Go(func() error { - var le logicerr.Logical - for range 1_000 { - _, err := fst.Delete(egCtx, delPrm) - if err != nil && !errors.As(err, &le) { - return err - } - } - return nil - }) - - require.NoError(t, eg.Wait()) - - count, size = counter.CountSize() - realCount, realSize, err := fst.countFiles() - require.NoError(t, err) - require.Equal(t, realCount, count, "real %d, actual %d", realCount, count) - require.Equal(t, realSize, size, "real %d, actual %d", realSize, size) - }) -} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go deleted file mode 100644 index 6d633dad6..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ /dev/null @@ -1,138 +0,0 @@ -package fstree - -import ( - "errors" - "io/fs" - "os" - "strconv" - "sync/atomic" - "syscall" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -type writer interface { - writeData(string, []byte) error - removeFile(string, uint64) error -} - -type genericWriter struct { - perm fs.FileMode - flags int - - fileGuard keyLock - fileCounter FileCounter - fileCounterEnabled bool - suffix atomic.Uint64 -} - -func newGenericWriteData(c FileCounter, perm fs.FileMode, noSync bool) writer { - flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | os.O_EXCL - if !noSync { - flags |= os.O_SYNC - } - - var fileGuard keyLock = &noopKeyLock{} - fileCounterEnabled := counterEnabled(c) - if fileCounterEnabled { - fileGuard = utilSync.NewKeyLocker[string]() - } - - w := &genericWriter{ - perm: perm, - flags: flags, - - fileCounterEnabled: fileCounterEnabled, - fileGuard: fileGuard, - fileCounter: c, - } - return w -} - -func (w *genericWriter) writeData(p string, data []byte) error { - tmpPath := p + "#" + strconv.FormatUint(w.suffix.Add(1), 10) - return w.writeAndRename(tmpPath, p, data) -} - -// writeAndRename opens tmpPath exclusively, writes data to it and renames it to p. -func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { - if w.fileCounterEnabled { - w.fileGuard.Lock(p) - defer w.fileGuard.Unlock(p) - } - - err := w.writeFile(tmpPath, data) - if err != nil { - var pe *fs.PathError - if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) - } - return err - } - - if w.fileCounterEnabled { - w.fileCounter.Inc(uint64(len(data))) - var targetFileExists bool - if _, e := os.Stat(p); e == nil { - targetFileExists = true - } - err = os.Rename(tmpPath, p) - if err == nil && targetFileExists { - w.fileCounter.Dec(uint64(len(data))) - } - } else { - err = os.Rename(tmpPath, p) - } - return err -} - -// writeFile writes data to a file with path p. -// The code is copied from `os.WriteFile` with minor corrections for flags. -func (w *genericWriter) writeFile(p string, data []byte) error { - f, err := os.OpenFile(p, w.flags, w.perm) - if err != nil { - return err - } - _, err = f.Write(data) - if err1 := f.Close(); err1 != nil && err == nil { - err = err1 - } - return err -} - -func (w *genericWriter) removeFile(p string, size uint64) error { - var err error - if w.fileCounterEnabled { - err = w.removeWithCounter(p, size) - } else { - err = os.Remove(p) - } - - if err != nil && os.IsNotExist(err) { - err = logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - return err -} - -func (w *genericWriter) removeWithCounter(p string, size uint64) error { - w.fileGuard.Lock(p) - defer w.fileGuard.Unlock(p) - - if size == 0 { - stat, err := os.Stat(p) - if err != nil { - return err - } - size = uint64(stat.Size()) - } - - if err := os.Remove(p); err != nil { - return err - } - w.fileCounter.Dec(size) - return nil -} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go deleted file mode 100644 index 49cbda344..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go +++ /dev/null @@ -1,137 +0,0 @@ -//go:build linux && !fstree_generic - -package fstree - -import ( - "errors" - "io/fs" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "golang.org/x/sys/unix" -) - -type linuxWriter struct { - root string - perm uint32 - flags int - - fileGuard keyLock - fileCounter FileCounter - fileCounterEnabled bool -} - -func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync bool) writer { - flags := unix.O_WRONLY | unix.O_TMPFILE | unix.O_CLOEXEC - if !noSync { - flags |= unix.O_DSYNC - } - fd, err := unix.Open(root, flags, uint32(perm)) - if err != nil { - // Which means that OS-specific writeData can't be created - // and FSTree should use the generic one. - return nil - } - _ = unix.Close(fd) // Don't care about error. - var fileGuard keyLock = &noopKeyLock{} - fileCounterEnabled := counterEnabled(c) - if fileCounterEnabled { - fileGuard = utilSync.NewKeyLocker[string]() - } - w := &linuxWriter{ - root: root, - perm: uint32(perm), - flags: flags, - fileGuard: fileGuard, - fileCounter: c, - fileCounterEnabled: fileCounterEnabled, - } - return w -} - -func (w *linuxWriter) writeData(p string, data []byte) error { - err := w.writeFile(p, data) - if errors.Is(err, unix.ENOSPC) { - return common.ErrNoSpace - } - return err -} - -func (w *linuxWriter) writeFile(p string, data []byte) error { - if w.fileCounterEnabled { - w.fileGuard.Lock(p) - defer w.fileGuard.Unlock(p) - } - fd, err := unix.Open(w.root, w.flags, w.perm) - if err != nil { - return err - } - written := 0 - tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10) - n, err := unix.Write(fd, data) - for err == nil { - written += n - - if written == len(data) { - err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW) - if err == nil { - w.fileCounter.Inc(uint64(len(data))) - } - if errors.Is(err, unix.EEXIST) { - err = nil - } - break - } - - // From man 2 write: - // https://www.man7.org/linux/man-pages/man2/write.2.html - // - // Note that a successful write() may transfer fewer than count - // bytes. Such partial writes can occur for various reasons; for - // example, because there was insufficient space on the disk device - // to write all of the requested bytes, or because a blocked write() - // to a socket, pipe, or similar was interrupted by a signal handler - // after it had transferred some, but before it had transferred all - // of the requested bytes. In the event of a partial write, the - // caller can make another write() call to transfer the remaining - // bytes. The subsequent call will either transfer further bytes or - // may result in an error (e.g., if the disk is now full). - n, err = unix.Write(fd, data[written:]) - } - errClose := unix.Close(fd) - if err != nil { - return err // Close() error is ignored, we have a better one. - } - return errClose -} - -func (w *linuxWriter) removeFile(p string, size uint64) error { - if w.fileCounterEnabled { - w.fileGuard.Lock(p) - defer w.fileGuard.Unlock(p) - - if size == 0 { - var stat unix.Stat_t - err := unix.Stat(p, &stat) - if err != nil { - if err == unix.ENOENT { - return logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - return err - } - size = uint64(stat.Size) - } - } - - err := unix.Unlink(p) - if err != nil && err == unix.ENOENT { - return logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - if err == nil { - w.fileCounter.Dec(size) - } - return err -} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go deleted file mode 100644 index 7fae2e695..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build linux && integration - -package fstree - -import ( - "context" - "errors" - "os" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" -) - -func TestENOSPC(t *testing.T) { - dir, err := os.MkdirTemp(t.TempDir(), "ramdisk") - require.NoError(t, err) - - f, err := os.CreateTemp(t.TempDir(), "ramdisk_*") - require.NoError(t, err) - - err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M") - if errors.Is(err, unix.EPERM) { - t.Skipf("skip size tests: no permission to mount: %v", err) - return - } - require.NoError(t, err) - defer func() { - require.NoError(t, unix.Unmount(dir, 0)) - }() - - fst := New(WithPath(dir), WithDepth(1)) - require.NoError(t, fst.Open(mode.ComponentReadWrite)) - require.NoError(t, fst.Init()) - - _, err = fst.Put(context.Background(), common.PutPrm{ - RawData: make([]byte, 10<<20), - }) - require.ErrorIs(t, err, common.ErrNoSpace) -} diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go deleted file mode 100644 index 67052d947..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux || fstree_generic - -package fstree - -import ( - "io/fs" -) - -func newSpecificWriteData(_ FileCounter, _ string, _ fs.FileMode, _ bool) writer { - return nil -} diff --git a/pkg/local_object_storage/blobstor/fstree/generic_test.go b/pkg/local_object_storage/blobstor/fstree/generic_test.go deleted file mode 100644 index 757482c78..000000000 --- a/pkg/local_object_storage/blobstor/fstree/generic_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package fstree - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" -) - -func TestGeneric(t *testing.T) { - newTreeFromPath := func(path string) common.Storage { - return New( - WithPath(path), - WithDepth(2), - WithDirNameLen(2)) - } - - newTree := func(t *testing.T) common.Storage { - return newTreeFromPath(t.TempDir()) - } - - blobstortest.TestAll(t, newTree, 2048, 16*1024) - - t.Run("info", func(t *testing.T) { - path := t.TempDir() - blobstortest.TestInfo(t, func(*testing.T) common.Storage { - return newTreeFromPath(path) - }, Type, path) - }) -} - -func TestControl(t *testing.T) { - newTree := func(t *testing.T) common.Storage { - return New( - WithPath(t.TempDir()), - WithDepth(2), - WithDirNameLen(2)) - } - - blobstortest.TestControl(t, newTree, 2048, 2048) -} diff --git a/pkg/local_object_storage/blobstor/fstree/metrics.go b/pkg/local_object_storage/blobstor/fstree/metrics.go deleted file mode 100644 index 4241beec9..000000000 --- a/pkg/local_object_storage/blobstor/fstree/metrics.go +++ /dev/null @@ -1,37 +0,0 @@ -package fstree - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -type Metrics interface { - SetParentID(parentID string) - - SetMode(mode mode.ComponentMode) - Close() - - Iterate(d time.Duration, success bool) - IterateInfo(d time.Duration, success bool) - Delete(d time.Duration, success bool) - Exists(d time.Duration, success bool) - Put(d time.Duration, size int, success bool) - Get(d time.Duration, size int, success bool) - GetRange(d time.Duration, size int, success bool) - ObjectsCount(d time.Duration, success bool) -} - -type noopMetrics struct{} - -func (m *noopMetrics) SetParentID(string) {} -func (m *noopMetrics) SetMode(mode.ComponentMode) {} -func (m *noopMetrics) Close() {} -func (m *noopMetrics) Iterate(time.Duration, bool) {} -func (m *noopMetrics) IterateInfo(time.Duration, bool) {} -func (m *noopMetrics) Delete(time.Duration, bool) {} -func (m *noopMetrics) Exists(time.Duration, bool) {} -func (m *noopMetrics) Put(time.Duration, int, bool) {} -func (m *noopMetrics) Get(time.Duration, int, bool) {} -func (m *noopMetrics) GetRange(time.Duration, int, bool) {} -func (m *noopMetrics) ObjectsCount(time.Duration, bool) {} diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go deleted file mode 100644 index 6f2ac87e1..000000000 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ /dev/null @@ -1,57 +0,0 @@ -package fstree - -import ( - "io/fs" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -type Option func(*FSTree) - -func WithDepth(d uint64) Option { - return func(f *FSTree) { - f.Depth = d - } -} - -func WithDirNameLen(l int) Option { - return func(f *FSTree) { - f.DirNameLen = l - } -} - -func WithPerm(p fs.FileMode) Option { - return func(f *FSTree) { - f.Permissions = p - } -} - -func WithPath(p string) Option { - return func(f *FSTree) { - f.RootPath = p - } -} - -func WithNoSync(noSync bool) Option { - return func(f *FSTree) { - f.noSync = noSync - } -} - -func WithMetrics(m Metrics) Option { - return func(f *FSTree) { - f.metrics = m - } -} - -func WithFileCounter(c FileCounter) Option { - return func(f *FSTree) { - f.fileCounter = c - } -} - -func WithLogger(l *logger.Logger) Option { - return func(f *FSTree) { - f.log = l - } -} diff --git a/pkg/local_object_storage/blobstor/generic_test.go b/pkg/local_object_storage/blobstor/generic_test.go deleted file mode 100644 index b58ab8a68..000000000 --- a/pkg/local_object_storage/blobstor/generic_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package blobstor - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest" -) - -func TestGeneric(t *testing.T) { - newMetabase := func(t *testing.T) storagetest.Component { - return New( - WithStorages(defaultStorages(t.TempDir(), 128))) - } - - storagetest.TestAll(t, newMetabase) -} diff --git a/pkg/local_object_storage/blobstor/get.go b/pkg/local_object_storage/blobstor/get.go deleted file mode 100644 index d00ef2f21..000000000 --- a/pkg/local_object_storage/blobstor/get.go +++ /dev/null @@ -1,53 +0,0 @@ -package blobstor - -import ( - "context" - "encoding/hex" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Get reads the object from b. -// If the descriptor is present, only one sub-storage is tried, -// Otherwise, each sub-storage is tried in order. -func (b *BlobStor) Get(ctx context.Context, prm common.GetPrm) (res common.GetRes, err error) { - startedAt := time.Now() - defer func() { - b.metrics.Get(time.Since(startedAt), len(res.RawData), err == nil, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Get", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.Bool("raw", prm.Raw), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - )) - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - if prm.StorageID == nil { - for i := range b.storage { - res, err = b.storage[i].Storage.Get(ctx, prm) - if err == nil || !client.IsErrObjectNotFound(err) { - return res, err - } - } - - return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - if len(prm.StorageID) == 0 { - res, err = b.storage[len(b.storage)-1].Storage.Get(ctx, prm) - } else { - res, err = b.storage[0].Storage.Get(ctx, prm) - } - return res, err -} diff --git a/pkg/local_object_storage/blobstor/get_range.go b/pkg/local_object_storage/blobstor/get_range.go deleted file mode 100644 index 9bded4720..000000000 --- a/pkg/local_object_storage/blobstor/get_range.go +++ /dev/null @@ -1,55 +0,0 @@ -package blobstor - -import ( - "context" - "encoding/hex" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// GetRange reads object payload data from b. -// If the descriptor is present, only one sub-storage is tried, -// Otherwise, each sub-storage is tried in order. -func (b *BlobStor) GetRange(ctx context.Context, prm common.GetRangePrm) (res common.GetRangeRes, err error) { - startedAt := time.Now() - defer func() { - b.metrics.GetRange(time.Since(startedAt), len(res.Data), err == nil, prm.StorageID != nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.GetRange", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), - attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)), - attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)), - )) - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - if prm.StorageID == nil { - for i := range b.storage { - res, err = b.storage[i].Storage.GetRange(ctx, prm) - if err == nil || !client.IsErrObjectNotFound(err) { - return res, err - } - } - - return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - if len(prm.StorageID) == 0 { - res, err = b.storage[len(b.storage)-1].Storage.GetRange(ctx, prm) - } else { - res, err = b.storage[0].Storage.GetRange(ctx, prm) - } - return res, err -} diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go deleted file mode 100644 index c1c47f3bb..000000000 --- a/pkg/local_object_storage/blobstor/info.go +++ /dev/null @@ -1,61 +0,0 @@ -package blobstor - -import ( - "context" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "golang.org/x/sync/errgroup" -) - -// DumpInfo returns information about blob stor. -func (b *BlobStor) DumpInfo() Info { - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - sub := make([]SubStorageInfo, len(b.storage)) - for i := range b.storage { - sub[i].Path = b.storage[i].Storage.Path() - sub[i].Type = b.storage[i].Storage.Type() - } - - return Info{ - SubStorages: sub, - } -} - -// ObjectsCount returns Blobstore's total objects count. -func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) { - var err error - startedAt := time.Now() - defer func() { - b.metrics.ObjectsCount(time.Since(startedAt), err == nil) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.ObjectsCount") - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - var result atomic.Uint64 - - eg, egCtx := errgroup.WithContext(ctx) - for i := range b.storage { - eg.Go(func() error { - v, e := b.storage[i].Storage.ObjectsCount(egCtx) - if e != nil { - return e - } - result.Add(v) - return nil - }) - } - - if err = eg.Wait(); err != nil { - return 0, err - } - - return result.Load(), nil -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go deleted file mode 100644 index 5d14a9a3a..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go +++ /dev/null @@ -1,101 +0,0 @@ -package blobstortest - -import ( - "context" - mrand "math/rand" - "testing" - "time" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -// Constructor constructs blobstor component. -// Each call must create a component using different file-system path. -type Constructor = func(t *testing.T) common.Storage - -// objectDesc is a helper structure to avoid multiple `Marshal` invokes during tests. -type objectDesc struct { - obj *objectSDK.Object - addr oid.Address - raw []byte - storageID []byte -} - -func TestAll(t *testing.T, cons Constructor, minSize, maxSize uint64) { - t.Run("get", func(t *testing.T) { - TestGet(t, cons, minSize, maxSize) - }) - t.Run("get range", func(t *testing.T) { - TestGetRange(t, cons, minSize, maxSize) - }) - t.Run("delete", func(t *testing.T) { - TestDelete(t, cons, minSize, maxSize) - }) - t.Run("exists", func(t *testing.T) { - TestExists(t, cons, minSize, maxSize) - }) - t.Run("iterate", func(t *testing.T) { - TestIterate(t, cons, minSize, maxSize) - }) -} - -func TestInfo(t *testing.T, cons Constructor, expectedType string, expectedPath string) { - s := cons(t) - require.Equal(t, expectedType, s.Type()) - require.Equal(t, expectedPath, s.Path()) -} - -func prepare(t *testing.T, count int, s common.Storage, minSize, maxSize uint64) []objectDesc { - objects := make([]objectDesc, count) - - r := mrand.New(mrand.NewSource(0)) - for i := range objects { - objects[i].obj = NewObject(minSize + uint64(r.Intn(int(maxSize-minSize+1)))) // not too large - objects[i].addr = objectCore.AddressOf(objects[i].obj) - - raw, err := objects[i].obj.Marshal() - require.NoError(t, err) - objects[i].raw = raw - } - - for i := range objects { - var prm common.PutPrm - prm.Address = objects[i].addr - prm.Object = objects[i].obj - prm.RawData = objects[i].raw - - putRes, err := s.Put(context.Background(), prm) - require.NoError(t, err) - - objects[i].storageID = putRes.StorageID - } - - return objects -} - -// NewObject creates a regular object of specified size with a random payload. -func NewObject(sz uint64) *objectSDK.Object { - raw := objectSDK.New() - - raw.SetID(oidtest.ID()) - raw.SetContainerID(cidtest.ID()) - - payload := make([]byte, sz) - r := mrand.New(mrand.NewSource(time.Now().UnixNano())) - r.Read(payload) - raw.SetPayload(payload) - - // fit the binary size to the required - data, _ := raw.Marshal() - if ln := uint64(len(data)); ln > sz { - raw.SetPayload(raw.Payload()[:sz-(ln-sz)]) - } - - return raw -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go deleted file mode 100644 index b8e88f84a..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go +++ /dev/null @@ -1,51 +0,0 @@ -package blobstortest - -import ( - "context" - "math/rand" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -// TestControl checks correctness of a read-only mode. -// cons must return a storage which is NOT opened. -func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) { - s := cons(t) - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - - objects := prepare(t, 10, s, minSize, maxSize) - require.NoError(t, s.Close(context.Background())) - - require.NoError(t, s.Open(mode.ComponentReadOnly)) - for i := range objects { - var prm common.GetPrm - prm.Address = objects[i].addr - prm.StorageID = objects[i].storageID - prm.Raw = true - - _, err := s.Get(context.Background(), prm) - require.NoError(t, err) - } - - t.Run("put fails", func(t *testing.T) { - var prm common.PutPrm - prm.Object = NewObject(minSize + uint64(rand.Intn(int(maxSize-minSize+1)))) - prm.Address = objectCore.AddressOf(prm.Object) - - _, err := s.Put(context.Background(), prm) - require.ErrorIs(t, err, common.ErrReadOnly) - }) - t.Run("delete fails", func(t *testing.T) { - var prm common.DeletePrm - prm.Address = objects[0].addr - prm.StorageID = objects[0].storageID - - _, err := s.Delete(context.Background(), prm) - require.ErrorIs(t, err, common.ErrReadOnly) - }) -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go deleted file mode 100644 index 3a163f6b1..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go +++ /dev/null @@ -1,84 +0,0 @@ -package blobstortest - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) { - s := cons(t) - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - objects := prepare(t, 4, s, minSize, maxSize) - - t.Run("delete non-existent", func(t *testing.T) { - var prm common.DeletePrm - prm.Address = oidtest.Address() - - _, err := s.Delete(context.Background(), prm) - require.True(t, client.IsErrObjectNotFound(err)) - }) - - t.Run("with storage ID", func(t *testing.T) { - var prm common.DeletePrm - prm.Address = objects[0].addr - prm.StorageID = objects[0].storageID - - _, err := s.Delete(context.Background(), prm) - require.NoError(t, err) - - t.Run("exists fail", func(t *testing.T) { - prm := common.ExistsPrm{Address: oidtest.Address()} - res, err := s.Exists(context.Background(), prm) - require.NoError(t, err) - require.False(t, res.Exists) - }) - t.Run("get fail", func(t *testing.T) { - prm := common.GetPrm{Address: oidtest.Address()} - _, err := s.Get(context.Background(), prm) - require.True(t, client.IsErrObjectNotFound(err)) - }) - t.Run("getrange fail", func(t *testing.T) { - prm := common.GetRangePrm{Address: oidtest.Address()} - _, err := s.GetRange(context.Background(), prm) - require.True(t, client.IsErrObjectNotFound(err)) - }) - }) - t.Run("without storage ID", func(t *testing.T) { - var prm common.DeletePrm - prm.Address = objects[1].addr - - _, err := s.Delete(context.Background(), prm) - require.NoError(t, err) - }) - - t.Run("delete twice", func(t *testing.T) { - var prm common.DeletePrm - prm.Address = objects[2].addr - prm.StorageID = objects[2].storageID - - _, err := s.Delete(context.Background(), prm) - require.NoError(t, err) - - _, err = s.Delete(context.Background(), prm) - require.True(t, client.IsErrObjectNotFound(err)) - }) - - t.Run("non-deleted object is still available", func(t *testing.T) { - var prm common.GetPrm - prm.Address = objects[3].addr - prm.Raw = true - - res, err := s.Get(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, objects[3].raw, res.RawData) - }) -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go deleted file mode 100644 index f34fe5f97..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go +++ /dev/null @@ -1,46 +0,0 @@ -package blobstortest - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) { - s := cons(t) - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - objects := prepare(t, 1, s, minSize, maxSize) - - t.Run("missing object", func(t *testing.T) { - prm := common.ExistsPrm{Address: oidtest.Address()} - res, err := s.Exists(context.Background(), prm) - require.NoError(t, err) - require.False(t, res.Exists) - }) - - var prm common.ExistsPrm - prm.Address = objects[0].addr - - t.Run("without storage ID", func(t *testing.T) { - prm.StorageID = nil - - res, err := s.Exists(context.Background(), prm) - require.NoError(t, err) - require.True(t, res.Exists) - }) - - t.Run("with storage ID", func(t *testing.T) { - prm.StorageID = objects[0].storageID - - res, err := s.Exists(context.Background(), prm) - require.NoError(t, err) - require.True(t, res.Exists) - }) -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go deleted file mode 100644 index af0f4b45d..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go +++ /dev/null @@ -1,52 +0,0 @@ -package blobstortest - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) { - s := cons(t) - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - objects := prepare(t, 2, s, minSize, maxSize) - - t.Run("missing object", func(t *testing.T) { - gPrm := common.GetPrm{Address: oidtest.Address()} - _, err := s.Get(context.Background(), gPrm) - require.True(t, client.IsErrObjectNotFound(err)) - }) - - for i := range objects { - var gPrm common.GetPrm - gPrm.Address = objects[i].addr - - // With storage ID. - gPrm.StorageID = objects[i].storageID - res, err := s.Get(context.Background(), gPrm) - require.NoError(t, err) - require.Equal(t, objects[i].obj, res.Object) - - // Without storage ID. - gPrm.StorageID = nil - res, err = s.Get(context.Background(), gPrm) - require.NoError(t, err) - require.Equal(t, objects[i].obj, res.Object) - - // With raw flag. - gPrm.StorageID = objects[i].storageID - gPrm.Raw = true - - res, err = s.Get(context.Background(), gPrm) - require.NoError(t, err) - require.Equal(t, objects[i].raw, res.RawData) - } -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go deleted file mode 100644 index 13032048c..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go +++ /dev/null @@ -1,87 +0,0 @@ -package blobstortest - -import ( - "context" - "math" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) { - s := cons(t) - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - objects := prepare(t, 1, s, minSize, maxSize) - - t.Run("missing object", func(t *testing.T) { - gPrm := common.GetRangePrm{Address: oidtest.Address()} - _, err := s.GetRange(context.Background(), gPrm) - require.True(t, client.IsErrObjectNotFound(err)) - }) - - payload := objects[0].obj.Payload() - - var start, stop uint64 = 11, 100 - if uint64(len(payload)) < stop { - t.Fatalf("unexpected: invalid test object generated") - } - - var gPrm common.GetRangePrm - gPrm.Address = objects[0].addr - gPrm.Range.SetOffset(start) - gPrm.Range.SetLength(stop - start) - - t.Run("without storage ID", func(t *testing.T) { - // Without storage ID. - res, err := s.GetRange(context.Background(), gPrm) - require.NoError(t, err) - require.Equal(t, payload[start:stop], res.Data) - }) - - t.Run("with storage ID", func(t *testing.T) { - gPrm.StorageID = objects[0].storageID - res, err := s.GetRange(context.Background(), gPrm) - require.NoError(t, err) - require.Equal(t, payload[start:stop], res.Data) - }) - - t.Run("offset > len(payload)", func(t *testing.T) { - gPrm.Range.SetOffset(uint64(len(payload) + 10)) - gPrm.Range.SetLength(10) - - _, err := s.GetRange(context.Background(), gPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange)) - }) - - t.Run("offset + length > len(payload)", func(t *testing.T) { - gPrm.Range.SetOffset(10) - gPrm.Range.SetLength(uint64(len(payload))) - - _, err := s.GetRange(context.Background(), gPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange)) - }) - - t.Run("length is negative when converted to int64", func(t *testing.T) { - gPrm.Range.SetOffset(0) - gPrm.Range.SetLength(1 << 63) - - _, err := s.GetRange(context.Background(), gPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange)) - }) - - t.Run("offset + length overflow uint64", func(t *testing.T) { - gPrm.Range.SetOffset(10) - gPrm.Range.SetLength(math.MaxUint64 - 2) - - _, err := s.GetRange(context.Background(), gPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange)) - }) -} diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go deleted file mode 100644 index d54c54f59..000000000 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ /dev/null @@ -1,99 +0,0 @@ -package blobstortest - -import ( - "context" - "errors" - "slices" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { - s := cons(t) - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - objects := prepare(t, 10, s, minSize, maxSize) - - // Delete random object to ensure it is not iterated over. - const delID = 2 - var delPrm common.DeletePrm - delPrm.Address = objects[2].addr - delPrm.StorageID = objects[2].storageID - _, err := s.Delete(context.Background(), delPrm) - require.NoError(t, err) - - objects = slices.Delete(objects, delID, delID+1) - - runTestNormalHandler(t, s, objects) - - runTestIgnoreLogicalErrors(t, s, objects) -} - -func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) { - t.Run("normal handler", func(t *testing.T) { - seen := make(map[string]objectDesc) - - var iterPrm common.IteratePrm - iterPrm.Handler = func(elem common.IterationElement) error { - seen[elem.Address.String()] = objectDesc{ - addr: elem.Address, - raw: elem.ObjectData, - storageID: elem.StorageID, - } - return nil - } - - _, err := s.Iterate(context.Background(), iterPrm) - require.NoError(t, err) - require.Len(t, objects, len(seen)) - for i := range objects { - d, ok := seen[objects[i].addr.String()] - require.True(t, ok) - require.Equal(t, objects[i].raw, d.raw) - require.Equal(t, objects[i].addr, d.addr) - require.Equal(t, objects[i].storageID, d.storageID) - } - }) -} - -func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []objectDesc) { - t.Run("ignore errors doesn't work for logical errors", func(t *testing.T) { - seen := make(map[string]objectDesc) - - var n int - logicErr := errors.New("logic error") - var iterPrm common.IteratePrm - iterPrm.IgnoreErrors = true - iterPrm.Handler = func(elem common.IterationElement) error { - seen[elem.Address.String()] = objectDesc{ - addr: elem.Address, - raw: elem.ObjectData, - storageID: elem.StorageID, - } - n++ - if n == len(objects)/2 { - return logicErr - } - return nil - } - - _, err := s.Iterate(context.Background(), iterPrm) - require.Equal(t, err, logicErr) - require.Len(t, seen, len(objects)/2) - for i := range objects { - d, ok := seen[objects[i].addr.String()] - if ok { - n-- - require.Equal(t, objects[i].raw, d.raw) - require.Equal(t, objects[i].addr, d.addr) - require.Equal(t, objects[i].storageID, d.storageID) - } - } - require.Equal(t, 0, n) - }) -} diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go deleted file mode 100644 index ff1aa9d64..000000000 --- a/pkg/local_object_storage/blobstor/iterate.go +++ /dev/null @@ -1,71 +0,0 @@ -package blobstor - -import ( - "context" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Iterate traverses the storage over the stored objects and calls the handler -// on each element. -// -// Returns any error encountered that -// did not allow to completely iterate over the storage. -// -// If handler returns an error, method wraps and returns it immediately. -func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - b.metrics.Iterate(time.Since(startedAt), success) - }() - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Iterate", - trace.WithAttributes( - attribute.Bool("ignore_errors", prm.IgnoreErrors), - )) - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - for i := range b.storage { - _, err := b.storage[i].Storage.Iterate(ctx, prm) - if err != nil { - if prm.IgnoreErrors { - b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("storage_path", b.storage[i].Storage.Path()), - zap.String("storage_type", b.storage[i].Storage.Type()), - zap.Error(err)) - continue - } - return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err) - } - } - success = true - return common.IterateRes{}, nil -} - -// IterateBinaryObjects is a helper function which iterates over BlobStor and passes binary objects to f. -// Errors related to object reading and unmarshaling are logged and skipped. -func IterateBinaryObjects(ctx context.Context, blz *BlobStor, f func(addr oid.Address, data []byte, descriptor []byte) error) error { - var prm common.IteratePrm - - prm.Handler = func(elem common.IterationElement) error { - return f(elem.Address, elem.ObjectData, elem.StorageID) - } - prm.IgnoreErrors = true - - _, err := blz.Iterate(ctx, prm) - - return err -} diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go deleted file mode 100644 index 2786321a8..000000000 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package blobstor - -import ( - "context" - "encoding/binary" - "errors" - "os" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestIterateObjects(t *testing.T) { - p := t.Name() - - const smalSz = 50 - - // create BlobStor instance - blobStor := New( - WithStorages(defaultStorages(p, smalSz)), - WithCompression(compression.Config{ - Enabled: true, - }), - ) - - defer os.RemoveAll(p) - - // open Blobstor - require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - - // initialize Blobstor - require.NoError(t, blobStor.Init(context.Background())) - - defer blobStor.Close(context.Background()) - - const objNum = 5 - - type addrData struct { - big bool - addr oid.Address - data []byte - } - - mObjs := make(map[string]addrData) - - for i := range uint64(objNum) { - sz := smalSz - - big := i < objNum/2 - if big { - sz++ - } - - data := make([]byte, sz) - binary.BigEndian.PutUint64(data, i) - - addr := oidtest.Address() - - mObjs[string(data)] = addrData{ - big: big, - addr: addr, - data: data, - } - } - - for _, v := range mObjs { - _, err := blobStor.Put(context.Background(), common.PutPrm{Address: v.addr, RawData: v.data}) - require.NoError(t, err) - } - - err := IterateBinaryObjects(context.Background(), blobStor, func(addr oid.Address, data []byte, descriptor []byte) error { - v, ok := mObjs[string(data)] - require.True(t, ok) - - require.Equal(t, v.data, data) - - if v.big { - require.True(t, descriptor != nil && len(descriptor) == 0) - } else { - require.NotEmpty(t, descriptor) - } - - delete(mObjs, string(data)) - - return nil - }) - require.NoError(t, err) - require.Empty(t, mObjs) -} - -func TestIterate_IgnoreErrors(t *testing.T) { - ctx := context.Background() - - myErr := errors.New("unique error") - nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil } - panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") } - errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr } - - var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error) - st1 := teststore.New( - teststore.WithSubstorage(memstore.New()), - teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) { - return s1iter(prm) - })) - st2 := teststore.New( - teststore.WithSubstorage(memstore.New()), - teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) { - return s2iter(prm) - })) - - bsOpts := []Option{WithStorages([]SubStorage{ - {Storage: st1}, - {Storage: st2}, - })} - bs := New(bsOpts...) - require.NoError(t, bs.Open(ctx, mode.ReadWrite)) - require.NoError(t, bs.Init(ctx)) - - nopHandler := func(e common.IterationElement) error { - return nil - } - - t.Run("no errors", func(t *testing.T) { - s1iter = nopIter - s2iter = nopIter - _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler}) - require.NoError(t, err) - }) - t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) { - s1iter = errIter - s2iter = panicIter - _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler}) - require.ErrorIs(t, err, myErr) - }) - - t.Run("ignore errors, storage 1", func(t *testing.T) { - s1iter = errIter - s2iter = nopIter - _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler}) - require.NoError(t, err) - }) - t.Run("ignore errors, storage 2", func(t *testing.T) { - s1iter = nopIter - s2iter = errIter - _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler}) - require.NoError(t, err) - }) -} diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go deleted file mode 100644 index 070b1eac9..000000000 --- a/pkg/local_object_storage/blobstor/logger.go +++ /dev/null @@ -1,23 +0,0 @@ -package blobstor - -import ( - "context" - - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -const ( - deleteOp = "DELETE" - putOp = "PUT" -) - -func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) { - storagelog.Write(ctx, l, - storagelog.AddressField(addr), - storagelog.OpField(op), - storagelog.StorageTypeField(typ), - storagelog.StorageIDField(sID), - ) -} diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go deleted file mode 100644 index 3df96a1c3..000000000 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ /dev/null @@ -1,22 +0,0 @@ -package memstore - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -func (s *memstoreImpl) Open(mod mode.ComponentMode) error { - s.readOnly = mod.ReadOnly() - return nil -} - -func (s *memstoreImpl) Init() error { return nil } -func (s *memstoreImpl) Close(context.Context) error { return nil } -func (s *memstoreImpl) Type() string { return Type } -func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } -func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} -func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go deleted file mode 100644 index 7ef7e37a4..000000000 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package memstore implements a memory-backed common.Storage for testing purposes. -package memstore - -import ( - "context" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -const Type = "memstore" - -type memstoreImpl struct { - *cfg - mu sync.RWMutex - objs map[string][]byte -} - -func New(opts ...Option) common.Storage { - st := &memstoreImpl{ - cfg: defaultConfig(), - objs: map[string][]byte{}, - } - - for _, opt := range opts { - opt(st.cfg) - } - - return st -} - -func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, error) { - key := req.Address.EncodeToString() - - s.mu.RLock() - data, exists := s.objs[key] - s.mu.RUnlock() - - if !exists { - return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - // Decompress the data. - var err error - if data, err = s.compression.Decompress(data); err != nil { - return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) - } - - // Unmarshal the SDK object. - obj := objectSDK.New() - if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) - } - - return common.GetRes{Object: obj, RawData: data}, nil -} - -func (s *memstoreImpl) GetRange(ctx context.Context, req common.GetRangePrm) (common.GetRangeRes, error) { - getResp, err := s.Get(ctx, common.GetPrm{ - Address: req.Address, - StorageID: req.StorageID, - }) - if err != nil { - return common.GetRangeRes{}, err - } - - payload := getResp.Object.Payload() - from := req.Range.GetOffset() - to := from + req.Range.GetLength() - - if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { - return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) - } - - return common.GetRangeRes{ - Data: payload[from:to], - }, nil -} - -func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.ExistsRes, error) { - key := req.Address.EncodeToString() - - s.mu.RLock() - defer s.mu.RUnlock() - - _, exists := s.objs[key] - return common.ExistsRes{Exists: exists}, nil -} - -func (s *memstoreImpl) Put(_ context.Context, req common.PutPrm) (common.PutRes, error) { - if s.readOnly { - return common.PutRes{}, common.ErrReadOnly - } - if !req.DontCompress { - req.RawData = s.compression.Compress(req.RawData) - } - - key := req.Address.EncodeToString() - - s.mu.Lock() - defer s.mu.Unlock() - - s.objs[key] = req.RawData - return common.PutRes{StorageID: []byte(s.rootPath)}, nil -} - -func (s *memstoreImpl) Delete(_ context.Context, req common.DeletePrm) (common.DeleteRes, error) { - if s.readOnly { - return common.DeleteRes{}, common.ErrReadOnly - } - - key := req.Address.EncodeToString() - - s.mu.Lock() - defer s.mu.Unlock() - - if _, exists := s.objs[key]; exists { - delete(s.objs, key) - return common.DeleteRes{}, nil - } - - return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) -} - -func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common.IterateRes, error) { - s.mu.RLock() - defer s.mu.RUnlock() - for k, v := range s.objs { - elem := common.IterationElement{ - ObjectData: v, - } - if err := elem.Address.DecodeString(k); err != nil { - if req.IgnoreErrors { - continue - } - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err)) - } - var err error - if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil { - if req.IgnoreErrors { - continue - } - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decompressing data for address %q: %v", s, elem.Address.String(), err)) - } - switch { - case req.Handler != nil: - if err := req.Handler(elem); err != nil { - return common.IterateRes{}, err - } - default: - if !req.IgnoreErrors { - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) no Handler or LazyHandler set for IteratePrm", s)) - } - } - } - return common.IterateRes{}, nil -} - -func (s *memstoreImpl) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) { - return common.RebuildRes{}, nil -} - -func (s *memstoreImpl) ObjectsCount(_ context.Context) (uint64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - return uint64(len(s.objs)), nil -} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go deleted file mode 100644 index f904d4232..000000000 --- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package memstore - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestSimpleLifecycle(t *testing.T) { - s := New( - WithRootPath("memstore"), - ) - defer func() { require.NoError(t, s.Close(context.Background())) }() - require.NoError(t, s.Open(mode.ComponentReadWrite)) - require.NoError(t, s.Init()) - - obj := blobstortest.NewObject(1024) - addr := object.AddressOf(obj) - d, err := obj.Marshal() - require.NoError(t, err) - - { - _, err := s.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true}) - require.NoError(t, err) - } - - { - resp, err := s.Exists(context.Background(), common.ExistsPrm{Address: addr}) - require.NoError(t, err) - require.True(t, resp.Exists) - } - - { - resp, err := s.Get(context.Background(), common.GetPrm{Address: addr}) - require.NoError(t, err) - require.Equal(t, obj.Payload(), resp.Object.Payload()) - } - - { - var objRange objectSDK.Range - objRange.SetOffset(256) - objRange.SetLength(512) - resp, err := s.GetRange(context.Background(), common.GetRangePrm{ - Address: addr, - Range: objRange, - }) - require.NoError(t, err) - require.Equal(t, obj.Payload()[objRange.GetOffset():objRange.GetOffset()+objRange.GetLength()], resp.Data) - } - - { - _, err := s.Delete(context.Background(), common.DeletePrm{Address: addr}) - require.NoError(t, err) - } - - { - resp, err := s.Exists(context.Background(), common.ExistsPrm{Address: addr}) - require.NoError(t, err) - require.False(t, resp.Exists) - } -} diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go deleted file mode 100644 index 7605af4e5..000000000 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ /dev/null @@ -1,29 +0,0 @@ -package memstore - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" -) - -type cfg struct { - rootPath string - readOnly bool - compression *compression.Compressor -} - -func defaultConfig() *cfg { - return &cfg{} -} - -type Option func(*cfg) - -func WithRootPath(p string) Option { - return func(c *cfg) { - c.rootPath = p - } -} - -func WithReadOnly(ro bool) Option { - return func(c *cfg) { - c.readOnly = ro - } -} diff --git a/pkg/local_object_storage/blobstor/metrics.go b/pkg/local_object_storage/blobstor/metrics.go deleted file mode 100644 index aadc237af..000000000 --- a/pkg/local_object_storage/blobstor/metrics.go +++ /dev/null @@ -1,30 +0,0 @@ -package blobstor - -import "time" - -type Metrics interface { - SetParentID(parentID string) - SetMode(readOnly bool) - Close() - - Delete(d time.Duration, success, withStorageID bool) - Exists(d time.Duration, success, withStorageID bool) - GetRange(d time.Duration, size int, success, withStorageID bool) - Get(d time.Duration, size int, success, withStorageID bool) - Iterate(d time.Duration, success bool) - Put(d time.Duration, size int, success bool) - ObjectsCount(d time.Duration, success bool) -} - -type noopMetrics struct{} - -func (m *noopMetrics) SetParentID(string) {} -func (m *noopMetrics) SetMode(bool) {} -func (m *noopMetrics) Close() {} -func (m *noopMetrics) Delete(time.Duration, bool, bool) {} -func (m *noopMetrics) Exists(time.Duration, bool, bool) {} -func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {} -func (m *noopMetrics) Get(time.Duration, int, bool, bool) {} -func (m *noopMetrics) Iterate(time.Duration, bool) {} -func (m *noopMetrics) Put(time.Duration, int, bool) {} -func (m *noopMetrics) ObjectsCount(time.Duration, bool) {} diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go deleted file mode 100644 index 80268fa7a..000000000 --- a/pkg/local_object_storage/blobstor/mode.go +++ /dev/null @@ -1,36 +0,0 @@ -package blobstor - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -// SetMode sets the blobstor mode of operation. -func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { - b.modeMtx.Lock() - defer b.modeMtx.Unlock() - - if b.mode == m { - return nil - } - - if b.mode.ReadOnly() == m.ReadOnly() { - return nil - } - - err := b.Close(ctx) - if err == nil { - if err = b.openBlobStor(ctx, m); err == nil { - err = b.Init(ctx) - } - } - if err != nil { - return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) - } - - b.mode = m - b.metrics.SetMode(m.ReadOnly()) - return nil -} diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go deleted file mode 100644 index 64e3c8da1..000000000 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package blobstor - -import ( - "context" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -type storage struct { - desc string - create func(string) common.Storage -} - -func (s storage) open(b *testing.B) common.Storage { - st := s.create(b.TempDir()) - - require.NoError(b, st.Open(mode.ComponentReadWrite)) - require.NoError(b, st.Init()) - - return st -} - -// The storages to benchmark. Each storage has a description and a function which returns the actual -// storage along with a cleanup function. -var storages = []storage{ - { - desc: "memstore", - create: func(string) common.Storage { - return memstore.New() - }, - }, - { - desc: "fstree_nosync", - create: func(dir string) common.Storage { - return fstree.New( - fstree.WithPath(dir), - fstree.WithDepth(2), - fstree.WithDirNameLen(2), - fstree.WithNoSync(true), - ) - }, - }, - { - desc: "fstree_without_object_counter", - create: func(dir string) common.Storage { - return fstree.New( - fstree.WithPath(dir), - fstree.WithDepth(2), - fstree.WithDirNameLen(2), - ) - }, - }, - { - desc: "fstree_with_object_counter", - create: func(dir string) common.Storage { - return fstree.New( - fstree.WithPath(dir), - fstree.WithDepth(2), - fstree.WithDirNameLen(2), - fstree.WithFileCounter(fstree.NewSimpleCounter()), - ) - }, - }, - { - desc: "blobovniczatree", - create: func(dir string) common.Storage { - return blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithRootPath(dir), - ) - }, - }, -} - -func BenchmarkSubstorageReadPerf(b *testing.B) { - readTests := []struct { - desc string - size int - objGen func() testutil.ObjectGenerator - addrGen func() testutil.AddressGenerator - }{ - { - desc: "seq100", - size: 10000, - objGen: func() testutil.ObjectGenerator { return &testutil.SeqObjGenerator{ObjSize: 100} }, - addrGen: func() testutil.AddressGenerator { return &testutil.SeqAddrGenerator{MaxID: 100} }, - }, - { - desc: "rand100", - size: 10000, - objGen: func() testutil.ObjectGenerator { return &testutil.SeqObjGenerator{ObjSize: 100} }, - addrGen: func() testutil.AddressGenerator { return testutil.RandAddrGenerator(10000) }, - }, - } - for _, tt := range readTests { - for _, stEntry := range storages { - b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { - objGen := tt.objGen() - st := stEntry.open(b) - defer func() { require.NoError(b, st.Close(context.Background())) }() - - // Fill database - var errG errgroup.Group - for range tt.size { - obj := objGen.Next() - addr := testutil.AddressFromObject(b, obj) - errG.Go(func() error { - raw, err := obj.Marshal() - if err != nil { - return fmt.Errorf("marshal: %v", err) - } - _, err = st.Put(context.Background(), common.PutPrm{ - Address: addr, - RawData: raw, - }) - return err - }) - } - require.NoError(b, errG.Wait()) - - // Benchmark reading - addrGen := tt.addrGen() - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := st.Get(context.Background(), common.GetPrm{Address: addrGen.Next()}) - require.NoError(b, err) - } - }) - }) - } - } -} - -func BenchmarkSubstorageWritePerf(b *testing.B) { - generators := []struct { - desc string - create func() testutil.ObjectGenerator - }{ - {desc: "rand10", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 10} }}, - {desc: "rand100", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 100} }}, - {desc: "rand1000", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 1000} }}, - {desc: "overwrite10", create: func() testutil.ObjectGenerator { return &testutil.OverwriteObjGenerator{ObjSize: 10, MaxObjects: 100} }}, - {desc: "overwrite100", create: func() testutil.ObjectGenerator { return &testutil.OverwriteObjGenerator{ObjSize: 100, MaxObjects: 100} }}, - {desc: "overwrite1000", create: func() testutil.ObjectGenerator { - return &testutil.OverwriteObjGenerator{ObjSize: 1000, MaxObjects: 100} - }}, - } - - for _, genEntry := range generators { - for _, stEntry := range storages { - b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) { - gen := genEntry.create() - st := stEntry.open(b) - defer func() { require.NoError(b, st.Close(context.Background())) }() - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - obj := gen.Next() - addr := testutil.AddressFromObject(b, obj) - raw, err := obj.Marshal() - require.NoError(b, err) - if _, err := st.Put(context.Background(), common.PutPrm{ - Address: addr, - RawData: raw, - }); err != nil { - b.Fatalf("writing entry: %v", err) - } - } - }) - }) - } - } -} - -func BenchmarkSubstorageIteratePerf(b *testing.B) { - iterateTests := []struct { - desc string - size int - objGen func() testutil.ObjectGenerator - }{ - { - desc: "rand100", - size: 10000, - objGen: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 100} }, - }, - } - for _, tt := range iterateTests { - for _, stEntry := range storages { - b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { - objGen := tt.objGen() - st := stEntry.open(b) - defer func() { require.NoError(b, st.Close(context.Background())) }() - - // Fill database - for range tt.size { - obj := objGen.Next() - addr := testutil.AddressFromObject(b, obj) - raw, err := obj.Marshal() - require.NoError(b, err) - if _, err := st.Put(context.Background(), common.PutPrm{ - Address: addr, - RawData: raw, - }); err != nil { - b.Fatalf("writing entry: %v", err) - } - } - - // Benchmark iterate - cnt := 0 - b.ResetTimer() - _, err := st.Iterate(context.Background(), common.IteratePrm{ - Handler: func(elem common.IterationElement) error { - cnt++ - return nil - }, - }) - require.NoError(b, err) - require.Equal(b, tt.size, cnt) - }) - } - } -} diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go deleted file mode 100644 index fe9c109dd..000000000 --- a/pkg/local_object_storage/blobstor/put.go +++ /dev/null @@ -1,73 +0,0 @@ -package blobstor - -import ( - "context" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// ErrNoPlaceFound is returned when object can't be saved to any sub-storage component -// because of the policy. -var ErrNoPlaceFound = logicerr.New("couldn't find a place to store an object") - -// Put saves the object in BLOB storage. -// -// If object is "big", BlobStor saves the object in shallow dir. -// Otherwise, BlobStor saves the object in blobonicza. In this -// case the identifier of blobovnicza is returned. -// -// Returns any error encountered that -// did not allow to completely save the object. -func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { - var ( - startedAt = time.Now() - success = false - size = 0 - ) - defer func() { - b.metrics.Put(time.Since(startedAt), size, success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Put", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.Bool("dont_compress", prm.DontCompress), - )) - defer span.End() - - b.modeMtx.RLock() - defer b.modeMtx.RUnlock() - - if prm.Object != nil { - prm.Address = object.AddressOf(prm.Object) - } - if prm.RawData == nil { - // marshal object - data, err := prm.Object.Marshal() - if err != nil { - return common.PutRes{}, fmt.Errorf("marshal the object: %w", err) - } - prm.RawData = data - } - size = len(prm.RawData) - - for i := range b.storage { - if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) { - res, err := b.storage[i].Storage.Put(ctx, prm) - if err == nil { - success = true - logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID) - } - return res, err - } - } - - return common.PutRes{}, ErrNoPlaceFound -} diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go deleted file mode 100644 index f28816555..000000000 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ /dev/null @@ -1,41 +0,0 @@ -package blobstor - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type StorageIDUpdate interface { - UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error -} - -func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error { - var summary common.RebuildRes - var rErr error - for _, storage := range b.storage { - res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{ - MetaStorage: upd, - Limiter: concLimiter, - FillPercent: fillPercent, - }) - summary.FilesRemoved += res.FilesRemoved - summary.ObjectsMoved += res.ObjectsMoved - if err != nil { - b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages, - zap.String("failed_storage_path", storage.Storage.Path()), - zap.String("failed_storage_type", storage.Storage.Type()), - zap.Error(err)) - rErr = err - break - } - } - b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted, - zap.Bool("success", rErr == nil), - zap.Uint64("total_files_removed", summary.FilesRemoved), - zap.Uint64("total_objects_moved", summary.ObjectsMoved)) - return rErr -} diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go deleted file mode 100644 index 3a38ecf82..000000000 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ /dev/null @@ -1,82 +0,0 @@ -package teststore - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -type cfg struct { - st common.Storage - overrides struct { - Open func(mode mode.ComponentMode) error - Init func() error - Close func() error - - Type func() string - Path func() string - SetCompressor func(cc *compression.Compressor) - Compressor func() *compression.Compressor - SetReportErrorFunc func(f func(context.Context, string, error)) - - Get func(common.GetPrm) (common.GetRes, error) - GetRange func(common.GetRangePrm) (common.GetRangeRes, error) - Exists func(common.ExistsPrm) (common.ExistsRes, error) - Put func(common.PutPrm) (common.PutRes, error) - Delete func(common.DeletePrm) (common.DeleteRes, error) - Iterate func(common.IteratePrm) (common.IterateRes, error) - } -} - -type Option func(*cfg) - -func WithSubstorage(st common.Storage) Option { - return func(c *cfg) { - c.st = st - } -} - -func WithOpen(f func(mode.ComponentMode) error) Option { return func(c *cfg) { c.overrides.Open = f } } -func WithInit(f func() error) Option { return func(c *cfg) { c.overrides.Init = f } } -func WithClose(f func() error) Option { return func(c *cfg) { c.overrides.Close = f } } - -func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } -func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } - -func WithSetCompressor(f func(*compression.Compressor)) Option { - return func(c *cfg) { c.overrides.SetCompressor = f } -} - -func WithCompressor(f func() *compression.Compressor) Option { - return func(c *cfg) { c.overrides.Compressor = f } -} - -func WithReportErrorFunc(f func(func(context.Context, string, error))) Option { - return func(c *cfg) { c.overrides.SetReportErrorFunc = f } -} - -func WithGet(f func(common.GetPrm) (common.GetRes, error)) Option { - return func(c *cfg) { c.overrides.Get = f } -} - -func WithGetRange(f func(common.GetRangePrm) (common.GetRangeRes, error)) Option { - return func(c *cfg) { c.overrides.GetRange = f } -} - -func WithExists(f func(common.ExistsPrm) (common.ExistsRes, error)) Option { - return func(c *cfg) { c.overrides.Exists = f } -} - -func WithPut(f func(common.PutPrm) (common.PutRes, error)) Option { - return func(c *cfg) { c.overrides.Put = f } -} - -func WithDelete(f func(common.DeletePrm) (common.DeleteRes, error)) Option { - return func(c *cfg) { c.overrides.Delete = f } -} - -func WithIterate(f func(common.IteratePrm) (common.IterateRes, error)) Option { - return func(c *cfg) { c.overrides.Iterate = f } -} diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go deleted file mode 100644 index 190b6a876..000000000 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ /dev/null @@ -1,243 +0,0 @@ -// Package teststore provides a common.Storage implementation for testing/mocking purposes. -// -// A new teststore.TestStore can be obtained with teststore.New. Whenever one of the common.Storage -// methods is called, the implementation selects what function to call in the following order: -// 1. If an override for that method was provided at construction time (via teststore.WithXXX()) or -// afterwards via SetOption, that override is used. -// 2. If a substorage was provided at construction time (via teststore.WithSubstorage()) or afterwars -// via SetOption, the corresponding method in the substorage is used. -// 3. If none of the above apply, the call panics with an error describing the unexpected call. -// -// It's safe to call SetOption and the overrides from multiple goroutines, but it's the override's -// responsibility to ensure safety of whatever operation it executes. -package teststore - -import ( - "context" - "errors" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -// TestStore is a common.Storage implementation for testing/mocking purposes. -type TestStore struct { - mu sync.RWMutex - *cfg -} - -// ErrDiskExploded is a phony error which can be used for testing purposes to differentiate it from -// more common errors. -var ErrDiskExploded = errors.New("disk exploded") - -// New returns a teststore.TestStore from the given options. -func New(opts ...Option) *TestStore { - c := &cfg{} - for _, opt := range opts { - opt(c) - } - return &TestStore{cfg: c} -} - -// SetOption overrides an option of an existing teststore.TestStore. -// This is useful for overriding methods during a test so that different -// behaviors are simulated. -func (s *TestStore) SetOption(opt Option) { - s.mu.Lock() - defer s.mu.Unlock() - opt(s.cfg) -} - -func (s *TestStore) Open(mod mode.ComponentMode) error { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Open != nil: - return s.overrides.Open(mod) - case s.st != nil: - return s.st.Open(mod) - default: - panic(fmt.Sprintf("unexpected storage call: Open(%v)", mod.String())) - } -} - -func (s *TestStore) Init() error { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Init != nil: - return s.overrides.Init() - case s.st != nil: - return s.st.Init() - default: - panic("unexpected storage call: Init()") - } -} - -func (s *TestStore) Close(ctx context.Context) error { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Close != nil: - return s.overrides.Close() - case s.st != nil: - return s.st.Close(ctx) - default: - panic("unexpected storage call: Close()") - } -} - -func (s *TestStore) Type() string { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Type != nil: - return s.overrides.Type() - case s.st != nil: - return s.st.Type() - default: - panic("unexpected storage call: Type()") - } -} - -func (s *TestStore) Path() string { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Path != nil: - return s.overrides.Path() - case s.st != nil: - return s.st.Path() - default: - panic("unexpected storage call: Path()") - } -} - -func (s *TestStore) SetCompressor(cc *compression.Compressor) { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.SetCompressor != nil: - s.overrides.SetCompressor(cc) - case s.st != nil: - s.st.SetCompressor(cc) - default: - panic(fmt.Sprintf("unexpected storage call: SetCompressor(%+v)", cc)) - } -} - -func (s *TestStore) Compressor() *compression.Compressor { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Compressor != nil: - return s.overrides.Compressor() - case s.st != nil: - return s.st.Compressor() - default: - panic("unexpected storage call: Compressor()") - } -} - -func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.SetReportErrorFunc != nil: - s.overrides.SetReportErrorFunc(f) - case s.st != nil: - s.st.SetReportErrorFunc(f) - default: - panic("unexpected storage call: SetReportErrorFunc()") - } -} - -func (s *TestStore) Get(ctx context.Context, req common.GetPrm) (common.GetRes, error) { - switch { - case s.overrides.Get != nil: - return s.overrides.Get(req) - case s.st != nil: - return s.st.Get(ctx, req) - default: - panic(fmt.Sprintf("unexpected storage call: Get(%+v)", req)) - } -} - -func (s *TestStore) GetRange(ctx context.Context, req common.GetRangePrm) (common.GetRangeRes, error) { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.GetRange != nil: - return s.overrides.GetRange(req) - case s.st != nil: - return s.st.GetRange(ctx, req) - default: - panic(fmt.Sprintf("unexpected storage call: GetRange(%+v)", req)) - } -} - -func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.ExistsRes, error) { - switch { - case s.overrides.Exists != nil: - return s.overrides.Exists(req) - case s.st != nil: - return s.st.Exists(ctx, req) - default: - panic(fmt.Sprintf("unexpected storage call: Exists(%+v)", req)) - } -} - -func (s *TestStore) Put(ctx context.Context, req common.PutPrm) (common.PutRes, error) { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Put != nil: - return s.overrides.Put(req) - case s.st != nil: - return s.st.Put(ctx, req) - default: - panic(fmt.Sprintf("unexpected storage call: Put(%+v)", req)) - } -} - -func (s *TestStore) Delete(ctx context.Context, req common.DeletePrm) (common.DeleteRes, error) { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Delete != nil: - return s.overrides.Delete(req) - case s.st != nil: - return s.st.Delete(ctx, req) - default: - panic(fmt.Sprintf("unexpected storage call: Delete(%+v)", req)) - } -} - -func (s *TestStore) Iterate(ctx context.Context, req common.IteratePrm) (common.IterateRes, error) { - s.mu.RLock() - defer s.mu.RUnlock() - switch { - case s.overrides.Iterate != nil: - return s.overrides.Iterate(req) - case s.st != nil: - return s.st.Iterate(ctx, req) - default: - panic(fmt.Sprintf("unexpected storage call: Iterate(%+v)", req)) - } -} - -func (s *TestStore) SetParentID(string) {} - -func (s *TestStore) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) { - return common.RebuildRes{}, nil -} - -func (s *TestStore) ObjectsCount(ctx context.Context) (uint64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.st.ObjectsCount(ctx) -} diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go deleted file mode 100644 index e0617a832..000000000 --- a/pkg/local_object_storage/engine/container.go +++ /dev/null @@ -1,151 +0,0 @@ -package engine - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.uber.org/zap" -) - -// ContainerSizePrm groups parameters of ContainerSize operation. -type ContainerSizePrm struct { - cnr cid.ID -} - -// ContainerSizeRes resulting values of ContainerSize operation. -type ContainerSizeRes struct { - size uint64 -} - -// ListContainersPrm groups parameters of ListContainers operation. -type ListContainersPrm struct{} - -// ListContainersRes groups the resulting values of ListContainers operation. -type ListContainersRes struct { - containers []cid.ID -} - -// SetContainerID sets the identifier of the container to estimate the size. -func (p *ContainerSizePrm) SetContainerID(cnr cid.ID) { - p.cnr = cnr -} - -// Size returns calculated estimation of the container size. -func (r ContainerSizeRes) Size() uint64 { - return r.size -} - -// Containers returns a list of identifiers of the containers in which local objects are stored. -func (r ListContainersRes) Containers() []cid.ID { - return r.containers -} - -// ContainerSize returns the sum of estimation container sizes among all shards. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { - defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() - - err = e.execIfNotBlocked(func() error { - var csErr error - res, csErr = e.containerSize(ctx, prm) - return csErr - }) - - return -} - -// ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards. -func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) { - var prm ContainerSizePrm - - prm.SetContainerID(id) - - res, err := e.ContainerSize(ctx, prm) - if err != nil { - return 0, err - } - - return res.Size(), nil -} - -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { - var res ContainerSizeRes - err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - var csPrm shard.ContainerSizePrm - csPrm.SetContainerID(prm.cnr) - - csRes, err := sh.ContainerSize(ctx, csPrm) - if err != nil { - e.reportShardError(ctx, sh, "can't get container size", err, - zap.Stringer("container_id", prm.cnr)) - return false - } - - res.size += csRes.Size() - - return false - }) - - return res, err -} - -// ListContainers returns a unique container IDs presented in the engine objects. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) { - defer elapsed("ListContainers", e.metrics.AddMethodDuration)() - - err = e.execIfNotBlocked(func() error { - var lcErr error - res, lcErr = e.listContainers(ctx) - return lcErr - }) - - return -} - -// ListContainers calls ListContainers method on engine to get a unique container IDs presented in the engine objects. -func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { - var prm ListContainersPrm - - res, err := e.ListContainers(ctx, prm) - if err != nil { - return nil, err - } - - return res.Containers(), nil -} - -func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { - uniqueIDs := make(map[string]cid.ID) - - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) - if err != nil { - e.reportShardError(ctx, sh, "can't get list of containers", err) - return false - } - - for _, cnr := range res.Containers() { - id := cnr.EncodeToString() - if _, ok := uniqueIDs[id]; !ok { - uniqueIDs[id] = cnr - } - } - - return false - }); err != nil { - return ListContainersRes{}, err - } - - result := make([]cid.ID, 0, len(uniqueIDs)) - for _, v := range uniqueIDs { - result = append(result, v) - } - - return ListContainersRes{ - containers: result, - }, nil -} diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go deleted file mode 100644 index 39e532b6b..000000000 --- a/pkg/local_object_storage/engine/control.go +++ /dev/null @@ -1,303 +0,0 @@ -package engine - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "strings" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -type shardInitError struct { - err error - id string -} - -// Open opens all StorageEngine's components. -func (e *StorageEngine) Open(ctx context.Context) error { - e.mtx.Lock() - defer e.mtx.Unlock() - - var wg sync.WaitGroup - errCh := make(chan shardInitError, len(e.shards)) - - for id, sh := range e.shards { - wg.Add(1) - go func(id string, sh *shard.Shard) { - defer wg.Done() - if err := sh.Open(ctx); err != nil { - errCh <- shardInitError{ - err: err, - id: id, - } - } - }(id, sh.Shard) - } - wg.Wait() - close(errCh) - - for res := range errCh { - if res.err != nil { - e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping, - zap.String("id", res.id), - zap.Error(res.err)) - - sh := e.shards[res.id] - delete(e.shards, res.id) - - err := sh.Close(ctx) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, - zap.String("id", res.id), - zap.Error(res.err)) - } - - continue - } - } - - return nil -} - -// Init initializes all StorageEngine's components. -func (e *StorageEngine) Init(ctx context.Context) error { - e.mtx.Lock() - defer e.mtx.Unlock() - - errCh := make(chan shardInitError, len(e.shards)) - var eg errgroup.Group - if e.lowMem && e.anyShardRequiresRefill() { - eg.SetLimit(1) - } - - for id, sh := range e.shards { - eg.Go(func() error { - if err := sh.Init(ctx); err != nil { - errCh <- shardInitError{ - err: err, - id: id, - } - } - return nil - }) - } - err := eg.Wait() - close(errCh) - if err != nil { - return fmt.Errorf("initialize shards: %w", err) - } - - for res := range errCh { - if res.err != nil { - if errors.Is(res.err, blobstor.ErrInitBlobovniczas) { - e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping, - zap.String("id", res.id), - zap.Error(res.err)) - - sh := e.shards[res.id] - delete(e.shards, res.id) - - err := sh.Close(ctx) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, - zap.String("id", res.id), - zap.Error(res.err)) - } - - continue - } - return fmt.Errorf("initialize shard %s: %w", res.id, res.err) - } - } - - if len(e.shards) == 0 { - return errors.New("failed initialization on all shards") - } - - e.wg.Add(1) - go e.setModeLoop(ctx) - - return nil -} - -func (e *StorageEngine) anyShardRequiresRefill() bool { - for _, sh := range e.shards { - if sh.NeedRefillMetabase() { - return true - } - } - return false -} - -var errClosed = errors.New("storage engine is closed") - -// Close releases all StorageEngine's components. Waits for all data-related operations to complete. -// After the call, all the next ones will fail. -// -// The method MUST only be called when the application exits. -func (e *StorageEngine) Close(ctx context.Context) error { - close(e.closeCh) - defer e.wg.Wait() - return e.closeEngine(ctx) -} - -// closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) closeAllShards(ctx context.Context) error { - e.mtx.RLock() - defer e.mtx.RUnlock() - - for id, sh := range e.shards { - if err := sh.Close(ctx); err != nil { - e.log.Debug(ctx, logs.EngineCouldNotCloseShard, - zap.String("id", id), - zap.Error(err), - ) - } - } - - return nil -} - -// executes op if execution is not blocked, otherwise returns blocking error. -// -// Can be called concurrently with setBlockExecErr. -func (e *StorageEngine) execIfNotBlocked(op func() error) error { - e.blockExec.mtx.RLock() - defer e.blockExec.mtx.RUnlock() - - if e.blockExec.closed { - return errClosed - } - - return op() -} - -func (e *StorageEngine) closeEngine(ctx context.Context) error { - e.blockExec.mtx.Lock() - defer e.blockExec.mtx.Unlock() - - if e.blockExec.closed { - return errClosed - } - - e.blockExec.closed = true - return e.closeAllShards(ctx) -} - -type ReConfiguration struct { - shards map[string][]shard.Option // meta path -> shard opts -} - -// AddShard adds a shard for the reconfiguration. -// Shard identifier is calculated from paths used in blobstor. -func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) { - if rCfg.shards == nil { - rCfg.shards = make(map[string][]shard.Option) - } - - if _, found := rCfg.shards[id]; found { - return - } - - rCfg.shards[id] = opts -} - -// Reload reloads StorageEngine's configuration in runtime. -func (e *StorageEngine) Reload(ctx context.Context, rcfg ReConfiguration) error { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagCritical.String()) - type reloadInfo struct { - sh *shard.Shard - opts []shard.Option - } - - e.mtx.RLock() - - var shardsToRemove []string // shards IDs - var shardsToAdd []string // shard config identifiers (blobstor paths concatenation) - var shardsToReload []reloadInfo - - // mark removed shards for removal - for id, sh := range e.shards { - _, ok := rcfg.shards[calculateShardID(sh.DumpInfo())] - if !ok { - shardsToRemove = append(shardsToRemove, id) - } - } - -loop: - for newID := range rcfg.shards { - for _, sh := range e.shards { - // This calculation should be kept in sync with node - // configuration parsing during SIGHUP. - if newID == calculateShardID(sh.DumpInfo()) { - shardsToReload = append(shardsToReload, reloadInfo{ - sh: sh.Shard, - opts: rcfg.shards[newID], - }) - continue loop - } - } - - shardsToAdd = append(shardsToAdd, newID) - } - - e.mtx.RUnlock() - - e.removeShards(ctx, shardsToRemove...) - - for _, p := range shardsToReload { - err := p.sh.Reload(ctx, p.opts...) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotReloadAShard, - zap.Stringer("shard id", p.sh.ID()), - zap.Error(err)) - } - } - - for _, newID := range shardsToAdd { - sh, err := e.createShard(ctx, rcfg.shards[newID]) - if err != nil { - return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err) - } - - idStr := sh.ID().String() - - err = sh.Open(ctx) - if err == nil { - err = sh.Init(ctx) - } - if err != nil { - _ = sh.Close(ctx) - return fmt.Errorf("init %s shard: %w", idStr, err) - } - - err = e.addShard(sh) - if err != nil { - _ = sh.Close(ctx) - return fmt.Errorf("add %s shard: %w", idStr, err) - } - - e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr)) - } - - return nil -} - -func calculateShardID(info shard.Info) string { - // This calculation should be kept in sync with node - // configuration parsing during SIGHUP. - var sb strings.Builder - for _, sub := range info.BlobStorInfo.SubStorages { - sb.WriteString(filepath.Clean(sub.Path)) - } - return sb.String() -} diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go deleted file mode 100644 index 4ff0ed5ec..000000000 --- a/pkg/local_object_storage/engine/control_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "io/fs" - "os" - "path/filepath" - "strconv" - "sync/atomic" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -// TestInitializationFailure checks that shard is initialized and closed even if media -// under any single component is absent. -func TestInitializationFailure(t *testing.T) { - type openFileFunc func(string, int, fs.FileMode) (*os.File, error) - - type testShardOpts struct { - openFileMetabase openFileFunc - openFilePilorama openFileFunc - } - - testShard := func(opts testShardOpts) ([]shard.Option, *teststore.TestStore, *teststore.TestStore) { - sid, err := generateShardID() - require.NoError(t, err) - - storages, smallFileStorage, largeFileStorage := newTestStorages(t.TempDir(), 1<<20) - - wcOpts := []writecache.Option{ - writecache.WithPath(t.TempDir()), - } - - return []shard.Option{ - shard.WithID(sid), - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages(storages)), - shard.WithMetaBaseOptions( - meta.WithBoltDBOptions(&bbolt.Options{ - Timeout: 100 * time.Millisecond, - OpenFile: opts.openFileMetabase, - }), - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{})), - shard.WithWriteCache(true), - shard.WithWriteCacheOptions(wcOpts), - shard.WithPiloramaOptions( - pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")), - pilorama.WithOpenFile(opts.openFilePilorama), - ), - }, smallFileStorage, largeFileStorage - } - - t.Run("blobstor", func(t *testing.T) { - shardOpts, _, largeFileStorage := testShard(testShardOpts{ - openFileMetabase: os.OpenFile, - openFilePilorama: os.OpenFile, - }) - largeFileStorage.SetOption(teststore.WithOpen(func(primitiveMode mode.ComponentMode) error { - return teststore.ErrDiskExploded - })) - beforeReload := func() { - largeFileStorage.SetOption(teststore.WithOpen(nil)) - } - testEngineFailInitAndReload(t, false, shardOpts, beforeReload) - }) - t.Run("metabase", func(t *testing.T) { - var openFileMetabaseSucceed atomic.Bool - openFileMetabase := func(p string, f int, mode fs.FileMode) (*os.File, error) { - if openFileMetabaseSucceed.Load() { - return os.OpenFile(p, f, mode) - } - return nil, teststore.ErrDiskExploded - } - beforeReload := func() { - openFileMetabaseSucceed.Store(true) - } - shardOpts, _, _ := testShard(testShardOpts{ - openFileMetabase: openFileMetabase, - openFilePilorama: os.OpenFile, - }) - testEngineFailInitAndReload(t, true, shardOpts, beforeReload) - }) - t.Run("pilorama", func(t *testing.T) { - var openFilePiloramaSucceed atomic.Bool - openFilePilorama := func(p string, f int, mode fs.FileMode) (*os.File, error) { - if openFilePiloramaSucceed.Load() { - return os.OpenFile(p, f, mode) - } - return nil, teststore.ErrDiskExploded - } - beforeReload := func() { - openFilePiloramaSucceed.Store(true) - } - shardOpts, _, _ := testShard(testShardOpts{ - openFileMetabase: os.OpenFile, - openFilePilorama: openFilePilorama, - }) - testEngineFailInitAndReload(t, false, shardOpts, beforeReload) - }) -} - -func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.Option, beforeReload func()) { - var configID string - - e := New() - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - _, err := e.AddShard(context.Background(), opts...) - require.NoError(t, err) - - e.mtx.RLock() - var id string - for id = range e.shards { - break - } - configID = calculateShardID(e.shards[id].Shard.DumpInfo()) - e.mtx.RUnlock() - - err = e.Open(context.Background()) - require.NoError(t, err) - if degradedMode { - require.NoError(t, e.Init(context.Background())) - require.Equal(t, mode.DegradedReadOnly, e.DumpInfo().Shards[0].Mode) - return - } else { - require.Error(t, e.Init(context.Background())) - - e.mtx.RLock() - shardCount := len(e.shards) - e.mtx.RUnlock() - require.Equal(t, 0, shardCount) - } - - beforeReload() - - require.NoError(t, e.Reload(context.Background(), ReConfiguration{ - shards: map[string][]shard.Option{configID: opts}, - })) - - e.mtx.RLock() - shardCount := len(e.shards) - e.mtx.RUnlock() - require.Equal(t, 1, shardCount) -} - -func TestPersistentShardID(t *testing.T) { - dir := t.TempDir() - - te := newEngineWithErrorThreshold(t, dir, 1) - - checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) - require.NoError(t, te.ng.Close(context.Background())) - - newTe := newEngineWithErrorThreshold(t, dir, 1) - for i := range len(newTe.shards) { - require.Equal(t, te.shards[i].id, newTe.shards[i].id) - } - require.NoError(t, newTe.ng.Close(context.Background())) - - p1 := newTe.ng.shards[te.shards[0].id.String()].Shard.DumpInfo().MetaBaseInfo.Path - p2 := newTe.ng.shards[te.shards[1].id.String()].Shard.DumpInfo().MetaBaseInfo.Path - tmp := filepath.Join(dir, "tmp") - require.NoError(t, os.Rename(p1, tmp)) - require.NoError(t, os.Rename(p2, p1)) - require.NoError(t, os.Rename(tmp, p2)) - - newTe = newEngineWithErrorThreshold(t, dir, 1) - require.Equal(t, te.shards[1].id, newTe.shards[0].id) - require.Equal(t, te.shards[0].id, newTe.shards[1].id) - require.NoError(t, newTe.ng.Close(context.Background())) -} - -func TestReload(t *testing.T) { - path := t.TempDir() - - t.Run("add shards", func(t *testing.T) { - const shardNum = 4 - addPath := filepath.Join(path, "add") - - e, currShards := engineWithShards(t, addPath, shardNum) - - var rcfg ReConfiguration - for _, p := range currShards { - rcfg.AddShard(p, nil) - } - - rcfg.AddShard(currShards[0], nil) // same path - require.NoError(t, e.Reload(context.Background(), rcfg)) - - // no new paths => no new shards - require.Equal(t, shardNum, len(e.shards)) - - newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) - - // add new shard - rcfg.AddShard(newMeta, []shard.Option{shard.WithMetaBaseOptions( - meta.WithPath(newMeta), - meta.WithEpochState(epochState{}), - )}) - require.NoError(t, e.Reload(context.Background(), rcfg)) - - require.Equal(t, shardNum+1, len(e.shards)) - - require.NoError(t, e.Close(context.Background())) - }) - - t.Run("remove shards", func(t *testing.T) { - const shardNum = 4 - removePath := filepath.Join(path, "remove") - - e, currShards := engineWithShards(t, removePath, shardNum) - - var rcfg ReConfiguration - for i := range len(currShards) - 1 { // without one of the shards - rcfg.AddShard(currShards[i], nil) - } - - require.NoError(t, e.Reload(context.Background(), rcfg)) - - // removed one - require.Equal(t, shardNum-1, len(e.shards)) - - require.NoError(t, e.Close(context.Background())) - }) -} - -// engineWithShards creates engine with specified number of shards. Returns -// slice of paths to their metabase and the engine. -func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []string) { - addPath := filepath.Join(path, "add") - - currShards := make([]string, 0, num) - - te := testNewEngine(t). - setShardsNumOpts(t, num, func(id int) []shard.Option { - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages(newStorages(t, filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - ), - } - }). - prepare(t) - e, ids := te.engine, te.shardIDs - - for _, id := range ids { - currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo())) - } - - require.Equal(t, num, len(e.shards)) - - return e, currShards -} diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go deleted file mode 100644 index 223cdbc48..000000000 --- a/pkg/local_object_storage/engine/delete.go +++ /dev/null @@ -1,196 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// DeletePrm groups the parameters of Delete operation. -type DeletePrm struct { - addr oid.Address - - forceRemoval bool -} - -// WithAddress is a Delete option to set the addresses of the objects to delete. -// -// Option is required. -func (p *DeletePrm) WithAddress(addr oid.Address) { - p.addr = addr -} - -// WithForceRemoval is a Delete option to remove an object despite any -// restrictions imposed on deleting that object. Expected to be used -// only in control service. -func (p *DeletePrm) WithForceRemoval() { - p.forceRemoval = true -} - -// Delete marks the objects to be removed. -// -// Returns an error if executions are blocked (see BlockExecution). -// -// Returns apistatus.ObjectLocked if at least one object is locked. -// In this case no object from the list is marked to be deleted. -// -// NOTE: Marks any object to be deleted (despite any prohibitions -// on operations with that object) if WithForceRemoval option has -// been provided. -func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - attribute.Bool("force_removal", prm.forceRemoval), - )) - defer span.End() - defer elapsed("Delete", e.metrics.AddMethodDuration)() - - return e.execIfNotBlocked(func() error { - return e.delete(ctx, prm) - }) -} - -func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { - var locked struct { - is bool - } - var splitInfo *objectSDK.SplitInfo - var ecInfo *objectSDK.ECInfo - - // Removal of a big object is done in multiple stages: - // 1. Remove the parent object. If it is locked or already removed, return immediately. - // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { - var existsPrm shard.ExistsPrm - existsPrm.Address = prm.addr - - resExists, err := sh.Exists(ctx, existsPrm) - if err != nil { - if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) { - return true - } - - var splitErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - if errors.As(err, &splitErr) { - splitInfo = splitErr.SplitInfo() - } else if errors.As(err, &ecErr) { - e.deleteChunks(ctx, sh, ecInfo, prm) - return false - } else { - if !client.IsErrObjectNotFound(err) { - e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr)) - } - return false - } - } else if !resExists.Exists() { - return false - } - - var shPrm shard.InhumePrm - shPrm.MarkAsGarbage(prm.addr) - if prm.forceRemoval { - shPrm.ForceRemoval() - } - - _, err = sh.Inhume(ctx, shPrm) - if err != nil { - e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr)) - - var target *apistatus.ObjectLocked - locked.is = errors.As(err, &target) - - return locked.is - } - - // If a parent object is removed we should set GC mark on each shard. - return splitInfo == nil - }); err != nil { - return err - } - - if locked.is { - return new(apistatus.ObjectLocked) - } - - if splitInfo != nil { - return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) - } - - return nil -} - -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { - var fs objectSDK.SearchFilters - fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) - - var selectPrm shard.SelectPrm - selectPrm.SetFilters(fs) - selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID - - var inhumePrm shard.InhumePrm - if force { - inhumePrm.ForceRemoval() - } - - return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { - res, err := sh.Select(ctx, selectPrm) - if err != nil { - e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, - zap.Stringer("addr", addr), - zap.Error(err)) - return false - } - - for _, addr := range res.AddressList() { - inhumePrm.MarkAsGarbage(addr) - - _, err = sh.Inhume(ctx, inhumePrm) - if err != nil { - e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, - zap.Stringer("addr", addr), - zap.Error(err)) - continue - } - } - return false - }) -} - -func (e *StorageEngine) deleteChunks( - ctx context.Context, sh hashedShard, ecInfo *objectSDK.ECInfo, prm DeletePrm, -) { - var inhumePrm shard.InhumePrm - if prm.forceRemoval { - inhumePrm.ForceRemoval() - } - for _, chunk := range ecInfo.Chunks { - var addr oid.Address - addr.SetContainer(prm.addr.Container()) - var objID oid.ID - err := objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr)) - } - addr.SetObject(objID) - inhumePrm.MarkAsGarbage(addr) - _, err = sh.Inhume(ctx, inhumePrm) - if err != nil { - e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, - zap.Stringer("addr", addr), - zap.Error(err)) - continue - } - } -} diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go deleted file mode 100644 index a56598c09..000000000 --- a/pkg/local_object_storage/engine/delete_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDeleteBigObject(t *testing.T) { - t.Parallel() - - cnr := cidtest.ID() - parentID := oidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - parent.SetID(parentID) - parent.SetPayload(nil) - - const childCount = 10 - children := make([]*objectSDK.Object, childCount) - childIDs := make([]oid.ID, childCount) - for i := range children { - children[i] = testutil.GenerateObjectWithCID(cnr) - if i != 0 { - children[i].SetPreviousID(childIDs[i-1]) - } - if i == len(children)-1 { - children[i].SetParent(parent) - } - children[i].SetSplitID(splitID) - children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)}) - childIDs[i], _ = children[i].ID() - } - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(parentID) - link.SetSplitID(splitID) - link.SetChildren(childIDs...) - - e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - for i := range children { - require.NoError(t, Put(context.Background(), e, children[i], false)) - } - require.NoError(t, Put(context.Background(), e, link, false)) - - addrParent := object.AddressOf(parent) - checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true) - - addrLink := object.AddressOf(link) - checkGetError[error](t, e, addrLink, false) - - for i := range children { - checkGetError[error](t, e, object.AddressOf(children[i]), false) - } - - var deletePrm DeletePrm - deletePrm.WithForceRemoval() - deletePrm.WithAddress(addrParent) - - require.NoError(t, e.Delete(context.Background(), deletePrm)) - - checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) - checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) - for i := range children { - checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true) - } -} - -func TestDeleteBigObjectWithoutGC(t *testing.T) { - t.Parallel() - - cnr := cidtest.ID() - parentID := oidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - parent.SetID(parentID) - parent.SetPayload(nil) - - const childCount = 3 - children := make([]*objectSDK.Object, childCount) - childIDs := make([]oid.ID, childCount) - for i := range children { - children[i] = testutil.GenerateObjectWithCID(cnr) - if i != 0 { - children[i].SetPreviousID(childIDs[i-1]) - } - if i == len(children)-1 { - children[i].SetParent(parent) - } - children[i].SetSplitID(splitID) - children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)}) - childIDs[i], _ = children[i].ID() - } - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(parentID) - link.SetSplitID(splitID) - link.SetChildren(childIDs...) - - te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { - return []shard.Option{shard.WithDisabledGC()} - }).prepare(t) - e := te.engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - s1 := te.shards[0] - - for i := range children { - require.NoError(t, Put(context.Background(), e, children[i], false)) - } - require.NoError(t, Put(context.Background(), e, link, false)) - - addrParent := object.AddressOf(parent) - checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true) - - addrLink := object.AddressOf(link) - checkGetError[error](t, e, addrLink, false) - - for i := range children { - checkGetError[error](t, e, object.AddressOf(children[i]), false) - } - - // delete logical - var deletePrm DeletePrm - deletePrm.WithForceRemoval() - deletePrm.WithAddress(addrParent) - - require.NoError(t, e.Delete(context.Background(), deletePrm)) - - checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) - checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) - for i := range children { - checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true) - } - - // delete physical - var delPrm shard.DeletePrm - delPrm.SetAddresses(addrParent) - _, err := s1.Delete(context.Background(), delPrm) - require.NoError(t, err) - - delPrm.SetAddresses(addrLink) - _, err = s1.Delete(context.Background(), delPrm) - require.NoError(t, err) - - for i := range children { - delPrm.SetAddresses(object.AddressOf(children[i])) - _, err = s1.Delete(context.Background(), delPrm) - require.NoError(t, err) - } - - checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) - checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) - for i := range children { - checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true) - } -} - -func checkGetError[E error](t *testing.T, e *StorageEngine, addr oid.Address, shouldFail bool) { - var getPrm GetPrm - getPrm.WithAddress(addr) - - _, err := e.Get(context.Background(), getPrm) - if shouldFail { - var target E - require.ErrorAs(t, err, &target) - } else { - require.NoError(t, err) - } -} diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go deleted file mode 100644 index 376d545d3..000000000 --- a/pkg/local_object_storage/engine/engine.go +++ /dev/null @@ -1,282 +0,0 @@ -package engine - -import ( - "context" - "errors" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.uber.org/zap" -) - -// StorageEngine represents FrostFS local storage engine. -type StorageEngine struct { - *cfg - - removeDuplicatesInProgress atomic.Bool - - mtx sync.RWMutex - - shards map[string]hashedShard - - closeCh chan struct{} - setModeCh chan setModeRequest - wg sync.WaitGroup - - blockExec struct { - mtx sync.RWMutex - closed bool - } - evacuateLimiter *evacuationLimiter -} - -type shardWrapper struct { - errorCount *atomic.Uint32 - *shard.Shard -} - -type setModeRequest struct { - sh *shard.Shard - isMeta bool - errorCount uint32 -} - -// setModeLoop listens setModeCh to perform degraded mode transition of a single shard. -// Instead of creating a worker per single shard we use a single goroutine. -func (e *StorageEngine) setModeLoop(ctx context.Context) { - defer e.wg.Done() - - var ( - mtx sync.RWMutex // protects inProgress map - inProgress = make(map[string]struct{}) - ) - - for { - select { - case <-e.closeCh: - return - case r := <-e.setModeCh: - sid := r.sh.ID().String() - - mtx.Lock() - _, ok := inProgress[sid] - if !ok { - inProgress[sid] = struct{}{} - go func() { - e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta) - - mtx.Lock() - delete(inProgress, sid) - mtx.Unlock() - }() - } - mtx.Unlock() - } - } -} - -func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) { - sid := sh.ID() - log := e.log.With( - zap.Stringer("shard_id", sid), - zap.Uint32("error count", errCount)) - - e.mtx.RLock() - defer e.mtx.RUnlock() - - if isMeta { - err := sh.SetMode(ctx, mode.DegradedReadOnly) - if err == nil { - log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) - return - } - log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, - zap.Error(err)) - } - - err := sh.SetMode(ctx, mode.ReadOnly) - if err != nil { - log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) - return - } - - log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) -} - -// reportShardErrorByID increases shard error counter and logs an error. -func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) { - e.mtx.RLock() - sh, ok := e.shards[id] - e.mtx.RUnlock() - - if !ok { - return - } - - e.reportShardError(ctx, sh, msg, err) -} - -// reportShardError checks that the amount of errors doesn't exceed the configured threshold. -// If it does, shard is set to read-only mode. -func (e *StorageEngine) reportShardError( - ctx context.Context, - sh hashedShard, - msg string, - err error, - fields ...zap.Field, -) { - if isLogical(err) { - e.log.Warn(ctx, msg, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - return - } - - errCount := sh.errorCount.Add(1) - e.metrics.IncErrorCounter(sh.ID().String()) - - sid := sh.ID() - e.log.Warn(ctx, msg, append([]zap.Field{ - zap.Stringer("shard_id", sid), - zap.Uint32("error count", errCount), - zap.Error(err), - }, fields...)...) - - if e.errorsThreshold == 0 || errCount < e.errorsThreshold { - return - } - - req := setModeRequest{ - errorCount: errCount, - sh: sh.Shard, - isMeta: errors.As(err, new(metaerr.Error)), - } - - select { - case e.setModeCh <- req: - default: - // For background workers we can have a lot of such errors, - // thus logging is done with DEBUG level. - e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, - zap.Stringer("shard_id", sid), - zap.Uint32("error_count", errCount)) - } -} - -func isLogical(err error) bool { - return errors.As(err, &logicerr.Logical{}) || - errors.Is(err, context.Canceled) || - errors.Is(err, context.DeadlineExceeded) || - errors.As(err, new(*apistatus.ResourceExhausted)) -} - -// Option represents StorageEngine's constructor option. -type Option func(*cfg) - -type cfg struct { - log *logger.Logger - - errorsThreshold uint32 - - metrics MetricRegister - - lowMem bool - - containerSource atomic.Pointer[containerSource] -} - -func defaultCfg() *cfg { - res := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - metrics: noopMetrics{}, - } - res.containerSource.Store(&containerSource{}) - return res -} - -// New creates, initializes and returns new StorageEngine instance. -func New(opts ...Option) *StorageEngine { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - evLimMtx := &sync.RWMutex{} - evLimCond := sync.NewCond(evLimMtx) - - return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{ - guard: evLimMtx, - statusCond: evLimCond, - }, - } -} - -// WithLogger returns option to set StorageEngine's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - -func WithMetrics(v MetricRegister) Option { - return func(c *cfg) { - c.metrics = v - } -} - -// WithErrorThreshold returns an option to specify size amount of errors after which -// shard is moved to read-only mode. -func WithErrorThreshold(sz uint32) Option { - return func(c *cfg) { - c.errorsThreshold = sz - } -} - -// WithLowMemoryConsumption returns an option to set the flag to reduce memory consumption by reducing performance. -func WithLowMemoryConsumption(lowMemCons bool) Option { - return func(c *cfg) { - c.lowMem = lowMemCons - } -} - -// SetContainerSource sets container source. -func (e *StorageEngine) SetContainerSource(cs container.Source) { - e.containerSource.Store(&containerSource{cs: cs}) -} - -type containerSource struct { - cs container.Source -} - -func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - - if s == nil || s.cs == nil { - return true, nil - } - - wasRemoved, err := container.WasRemoved(ctx, s.cs, id) - if err != nil { - return false, err - } - return !wasRemoved, nil -} diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go deleted file mode 100644 index fc6d9ee9c..000000000 --- a/pkg/local_object_storage/engine/engine_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "path/filepath" - "runtime/debug" - "strings" - "sync" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -type epochState struct { - currEpoch uint64 -} - -func (s epochState) CurrentEpoch() uint64 { - return s.currEpoch -} - -type testEngineWrapper struct { - engine *StorageEngine - shards []*shard.Shard - shardIDs []*shard.ID -} - -func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper { - opts = append(testGetDefaultEngineOptions(t), opts...) - return &testEngineWrapper{engine: New(opts...)} -} - -func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper { - return te.setShardsNumOpts(t, num, func(_ int) []shard.Option { - return testGetDefaultShardOptions(t) - }) -} - -func (te *testEngineWrapper) setShardsNumOpts( - t testing.TB, num int, shardOpts func(id int) []shard.Option, -) *testEngineWrapper { - te.shards = make([]*shard.Shard, num) - te.shardIDs = make([]*shard.ID, num) - for i := range num { - shard, err := te.engine.createShard(context.Background(), shardOpts(i)) - require.NoError(t, err) - require.NoError(t, te.engine.addShard(shard)) - te.shards[i] = shard - te.shardIDs[i] = shard.ID() - } - require.Len(t, te.engine.shards, num) - return te -} - -func (te *testEngineWrapper) setShardsNumAdditionalOpts( - t testing.TB, num int, shardOpts func(id int) []shard.Option, -) *testEngineWrapper { - return te.setShardsNumOpts(t, num, func(id int) []shard.Option { - return append(testGetDefaultShardOptions(t), shardOpts(id)...) - }) -} - -// prepare calls Open and Init on the created engine. -func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper { - require.NoError(t, te.engine.Open(context.Background())) - require.NoError(t, te.engine.Init(context.Background())) - return te -} - -func testGetDefaultEngineOptions(t testing.TB) []Option { - return []Option{ - WithLogger(test.NewLogger(t)), - } -} - -func testGetDefaultShardOptions(t testing.TB) []shard.Option { - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages( - newStorages(t, t.TempDir(), 1<<20)), - blobstor.WithLogger(test.NewLogger(t)), - ), - shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), - shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), - shard.WithLimiter(&testQoSLimiter{t: t}), - } -} - -func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option { - return []meta.Option{ - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - meta.WithLogger(test.NewLogger(t)), - } -} - -func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage { - return []blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(1), - blobovniczatree.WithBlobovniczaShallowWidth(1), - blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < smallSize - }, - }, - { - Storage: fstree.New( - fstree.WithPath(root), - fstree.WithDepth(1), - fstree.WithLogger(test.NewLogger(t))), - }, - } -} - -func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *teststore.TestStore, *teststore.TestStore) { - smallFileStorage := teststore.New( - teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(1), - blobovniczatree.WithBlobovniczaShallowWidth(1), - blobovniczatree.WithPermissions(0o700)), - )) - largeFileStorage := teststore.New( - teststore.WithSubstorage(fstree.New( - fstree.WithPath(root), - fstree.WithDepth(1)), - )) - return []blobstor.SubStorage{ - { - Storage: smallFileStorage, - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < smallSize - }, - }, - { - Storage: largeFileStorage, - }, - }, smallFileStorage, largeFileStorage -} - -var _ qos.Limiter = (*testQoSLimiter)(nil) - -type testQoSLimiter struct { - t testing.TB - quard sync.Mutex - id int64 - readStacks map[int64][]byte - writeStacks map[int64][]byte -} - -func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} - -func (t *testQoSLimiter) Close() { - t.quard.Lock() - defer t.quard.Unlock() - - var sb strings.Builder - var seqN int - for _, stack := range t.readStacks { - seqN++ - sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) - } - for _, stack := range t.writeStacks { - seqN++ - sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) - } - require.True(t.t, seqN == 0, sb.String()) -} - -func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { - t.quard.Lock() - defer t.quard.Unlock() - - stack := debug.Stack() - - t.id++ - id := t.id - - if t.readStacks == nil { - t.readStacks = make(map[int64][]byte) - } - t.readStacks[id] = stack - - return func() { - t.quard.Lock() - defer t.quard.Unlock() - - delete(t.readStacks, id) - }, nil -} - -func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { - t.quard.Lock() - defer t.quard.Unlock() - - stack := debug.Stack() - - t.id++ - id := t.id - - if t.writeStacks == nil { - t.writeStacks = make(map[int64][]byte) - } - t.writeStacks[id] = stack - - return func() { - t.quard.Lock() - defer t.quard.Unlock() - - delete(t.writeStacks, id) - }, nil -} - -func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go deleted file mode 100644 index 57029dd5f..000000000 --- a/pkg/local_object_storage/engine/error_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strconv" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -const errSmallSize = 256 - -type testEngine struct { - ng *StorageEngine - dir string - shards [2]*testShard -} - -type testShard struct { - id *shard.ID - smallFileStorage *teststore.TestStore - largeFileStorage *teststore.TestStore -} - -func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) *testEngine { - if dir == "" { - dir = t.TempDir() - } - - var testShards [2]*testShard - - te := testNewEngine(t, - WithErrorThreshold(errThreshold), - ). - setShardsNumOpts(t, 2, func(id int) []shard.Option { - storages, smallFileStorage, largeFileStorage := newTestStorages(filepath.Join(dir, strconv.Itoa(id)), errSmallSize) - testShards[id] = &testShard{ - smallFileStorage: smallFileStorage, - largeFileStorage: largeFileStorage, - } - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions(blobstor.WithStorages(storages)), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - ), - shard.WithPiloramaOptions( - pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))), - pilorama.WithPerm(0o700)), - } - }).prepare(t) - e := te.engine - - for i, id := range te.shardIDs { - testShards[i].id = id - } - - return &testEngine{ - ng: e, - dir: dir, - shards: testShards, - } -} - -func TestErrorReporting(t *testing.T) { - t.Run("ignore errors by default", func(t *testing.T) { - te := newEngineWithErrorThreshold(t, "", 0) - - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - obj.SetPayload(make([]byte, errSmallSize)) - - var prm shard.PutPrm - prm.SetObject(obj) - te.ng.mtx.RLock() - _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) - te.ng.mtx.RUnlock() - require.NoError(t, err) - - _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) - require.NoError(t, err) - - checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) - checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) - - for _, shard := range te.shards { - shard.largeFileStorage.SetOption(teststore.WithGet(func(common.GetPrm) (common.GetRes, error) { - return common.GetRes{}, teststore.ErrDiskExploded - })) - } - - for i := uint32(1); i < 3; i++ { - _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) - require.Error(t, err) - checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite) - checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) - } - require.NoError(t, te.ng.Close(context.Background())) - }) - t.Run("with error threshold", func(t *testing.T) { - const errThreshold = 3 - - te := newEngineWithErrorThreshold(t, "", errThreshold) - - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - obj.SetPayload(make([]byte, errSmallSize)) - - var prm shard.PutPrm - prm.SetObject(obj) - te.ng.mtx.RLock() - _, err := te.ng.shards[te.shards[0].id.String()].Put(context.Background(), prm) - te.ng.mtx.RUnlock() - require.NoError(t, err) - - _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) - require.NoError(t, err) - - checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) - checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) - - for _, shard := range te.shards { - shard.largeFileStorage.SetOption(teststore.WithGet(func(common.GetPrm) (common.GetRes, error) { - return common.GetRes{}, teststore.ErrDiskExploded - })) - } - - for i := uint32(1); i < errThreshold; i++ { - _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) - require.Error(t, err) - checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite) - checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) - } - - for i := range uint32(2) { - _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) - require.Error(t, err) - checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly) - checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) - } - - require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false)) - checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite) - - require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true)) - checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) - require.NoError(t, te.ng.Close(context.Background())) - }) -} - -func TestBlobstorFailback(t *testing.T) { - dir := t.TempDir() - - te := newEngineWithErrorThreshold(t, dir, 1) - - objs := make([]*objectSDK.Object, 0, 2) - for _, size := range []int{15, errSmallSize + 1} { - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - obj.SetPayload(make([]byte, size)) - - var prm shard.PutPrm - prm.SetObject(obj) - te.ng.mtx.RLock() - _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) - te.ng.mtx.RUnlock() - require.NoError(t, err) - objs = append(objs, obj) - } - - for i := range objs { - addr := object.AddressOf(objs[i]) - _, err := te.ng.Get(context.Background(), GetPrm{addr: addr}) - require.NoError(t, err) - _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr}) - require.NoError(t, err) - } - - checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) - require.NoError(t, te.ng.Close(context.Background())) - - p1 := te.ng.shards[te.shards[0].id.String()].Shard.DumpInfo().BlobStorInfo.SubStorages[1].Path - p2 := te.ng.shards[te.shards[1].id.String()].Shard.DumpInfo().BlobStorInfo.SubStorages[1].Path - tmp := filepath.Join(dir, "tmp") - require.NoError(t, os.Rename(p1, tmp)) - require.NoError(t, os.Rename(p2, p1)) - require.NoError(t, os.Rename(tmp, p2)) - - te = newEngineWithErrorThreshold(t, dir, 1) - - for i := range objs { - addr := object.AddressOf(objs[i]) - getRes, err := te.ng.Get(context.Background(), GetPrm{addr: addr}) - require.NoError(t, err) - require.Equal(t, objs[i], getRes.Object()) - - rngRes, err := te.ng.GetRange(context.Background(), RngPrm{addr: addr, off: 1, ln: 10}) - require.NoError(t, err) - require.Equal(t, objs[i].Payload()[1:11], rngRes.Object().Payload()) - - _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr, off: errSmallSize + 10, ln: 1}) - require.True(t, shard.IsErrOutOfRange(err)) - } - - checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) - checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) - require.NoError(t, te.ng.Close(context.Background())) -} - -func checkShardState(t *testing.T, e *StorageEngine, id *shard.ID, errCount uint32, mode mode.Mode) { - e.mtx.RLock() - sh := e.shards[id.String()] - e.mtx.RUnlock() - - require.Eventually(t, func() bool { - return errCount == sh.errorCount.Load() && - mode == sh.GetMode() - }, 10*time.Second, 10*time.Millisecond, "shard mode doesn't changed to expected state in 10 seconds") -} diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go deleted file mode 100644 index c08dfbf03..000000000 --- a/pkg/local_object_storage/engine/evacuate.go +++ /dev/null @@ -1,863 +0,0 @@ -package engine - -import ( - "context" - "errors" - "fmt" - "slices" - "strings" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/hrw" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -const ( - // containerWorkerCountDefault is a default value of the count of - // concurrent container evacuation workers. - containerWorkerCountDefault = 10 - // objectWorkerCountDefault is a default value of the count of - // concurrent object evacuation workers. - objectWorkerCountDefault = 10 -) - -var ( - ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode") - - evacuationOperationLogField = zap.String("operation", "evacuation") -) - -// EvacuateScope is an evacuation scope. Keep in sync with pkg/services/control/service.proto. -type EvacuateScope uint32 - -var ( - EvacuateScopeObjects EvacuateScope = 1 - EvacuateScopeTrees EvacuateScope = 2 -) - -func (s EvacuateScope) String() string { - var sb strings.Builder - first := true - if s&EvacuateScopeObjects == EvacuateScopeObjects { - sb.WriteString("objects") - first = false - } - if s&EvacuateScopeTrees == EvacuateScopeTrees { - if !first { - sb.WriteString(";") - } - sb.WriteString("trees") - } - return sb.String() -} - -func (s EvacuateScope) WithObjects() bool { - return s&EvacuateScopeObjects == EvacuateScopeObjects -} - -func (s EvacuateScope) WithTrees() bool { - return s&EvacuateScopeTrees == EvacuateScopeTrees -} - -func (s EvacuateScope) TreesOnly() bool { - return s == EvacuateScopeTrees -} - -// EvacuateShardPrm represents parameters for the EvacuateShard operation. -type EvacuateShardPrm struct { - ShardID []*shard.ID - ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error) - TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error) - IgnoreErrors bool - Scope EvacuateScope - RepOneOnly bool - - ContainerWorkerCount uint32 - ObjectWorkerCount uint32 -} - -// EvacuateShardRes represents result of the EvacuateShard operation. -type EvacuateShardRes struct { - objEvacuated *atomic.Uint64 - objTotal *atomic.Uint64 - objFailed *atomic.Uint64 - objSkipped *atomic.Uint64 - - trEvacuated *atomic.Uint64 - trTotal *atomic.Uint64 - trFailed *atomic.Uint64 -} - -// NewEvacuateShardRes creates new EvacuateShardRes instance. -func NewEvacuateShardRes() *EvacuateShardRes { - return &EvacuateShardRes{ - objEvacuated: new(atomic.Uint64), - objTotal: new(atomic.Uint64), - objFailed: new(atomic.Uint64), - objSkipped: new(atomic.Uint64), - trEvacuated: new(atomic.Uint64), - trTotal: new(atomic.Uint64), - trFailed: new(atomic.Uint64), - } -} - -// ObjectsEvacuated returns amount of evacuated objects. -// Objects for which handler returned no error are also assumed evacuated. -func (p *EvacuateShardRes) ObjectsEvacuated() uint64 { - if p == nil { - return 0 - } - return p.objEvacuated.Load() -} - -// ObjectsTotal returns total count objects to evacuate. -func (p *EvacuateShardRes) ObjectsTotal() uint64 { - if p == nil { - return 0 - } - return p.objTotal.Load() -} - -// ObjectsFailed returns count of failed objects to evacuate. -func (p *EvacuateShardRes) ObjectsFailed() uint64 { - if p == nil { - return 0 - } - return p.objFailed.Load() -} - -// ObjectsSkipped returns count of skipped objects. -func (p *EvacuateShardRes) ObjectsSkipped() uint64 { - if p == nil { - return 0 - } - return p.objSkipped.Load() -} - -// TreesEvacuated returns amount of evacuated trees. -func (p *EvacuateShardRes) TreesEvacuated() uint64 { - if p == nil { - return 0 - } - return p.trEvacuated.Load() -} - -// TreesTotal returns total count trees to evacuate. -func (p *EvacuateShardRes) TreesTotal() uint64 { - if p == nil { - return 0 - } - return p.trTotal.Load() -} - -// TreesFailed returns count of failed trees to evacuate. -func (p *EvacuateShardRes) TreesFailed() uint64 { - if p == nil { - return 0 - } - return p.trFailed.Load() -} - -// DeepCopy returns deep copy of result instance. -func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { - if p == nil { - return nil - } - - res := &EvacuateShardRes{ - objEvacuated: new(atomic.Uint64), - objTotal: new(atomic.Uint64), - objFailed: new(atomic.Uint64), - objSkipped: new(atomic.Uint64), - trEvacuated: new(atomic.Uint64), - trTotal: new(atomic.Uint64), - trFailed: new(atomic.Uint64), - } - - res.objEvacuated.Store(p.objEvacuated.Load()) - res.objTotal.Store(p.objTotal.Load()) - res.objFailed.Store(p.objFailed.Load()) - res.objSkipped.Store(p.objSkipped.Load()) - res.trTotal.Store(p.trTotal.Load()) - res.trEvacuated.Store(p.trEvacuated.Load()) - res.trFailed.Store(p.trFailed.Load()) - return res -} - -var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") - -// Evacuate moves data from one shard to the others. -// The shard being moved must be in read-only mode. -func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - shardIDs := make([]string, len(prm.ShardID)) - for i := range prm.ShardID { - shardIDs[i] = prm.ShardID[i].String() - } - - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate", - trace.WithAttributes( - attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("ignoreErrors", prm.IgnoreErrors), - attribute.Stringer("scope", prm.Scope), - )) - defer span.End() - - shards, err := e.getActualShards(shardIDs, prm) - if err != nil { - return err - } - - shardsToEvacuate := make(map[string]*shard.Shard) - for i := range shardIDs { - for j := range shards { - if shards[j].ID().String() == shardIDs[i] { - shardsToEvacuate[shardIDs[i]] = shards[j].Shard - } - } - } - - res := NewEvacuateShardRes() - ctx = context.WithoutCancel(ctx) - eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) - if err != nil { - return err - } - - var mtx sync.RWMutex - copyShards := func() []hashedShard { - mtx.RLock() - defer mtx.RUnlock() - t := slices.Clone(shards) - return t - } - eg.Go(func() error { - return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate) - }) - - return nil -} - -func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, -) error { - var err error - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", - trace.WithAttributes( - attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("ignoreErrors", prm.IgnoreErrors), - attribute.Stringer("scope", prm.Scope), - attribute.Bool("repOneOnly", prm.RepOneOnly), - )) - - defer func() { - span.End() - e.evacuateLimiter.Complete(err) - }() - - e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.Stringer("scope", prm.Scope)) - - err = e.getTotals(ctx, prm, shardsToEvacuate, res) - if err != nil { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, - zap.Stringer("scope", prm.Scope)) - return err - } - - ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm) - continueLoop := true - for i := 0; continueLoop && i < len(shardIDs); i++ { - select { - case <-ctx.Done(): - continueLoop = false - default: - egShard.Go(func() error { - err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject) - if err != nil { - cancel(err) - } - return err - }) - } - } - err = egShard.Wait() - if err != nil { - err = fmt.Errorf("shard error: %w", err) - } - errContainer := egContainer.Wait() - errObject := egObject.Wait() - if errContainer != nil { - err = errors.Join(err, fmt.Errorf("container error: %w", errContainer)) - } - if errObject != nil { - err = errors.Join(err, fmt.Errorf("object error: %w", errObject)) - } - if err != nil { - e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.Stringer("scope", prm.Scope)) - return err - } - - e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation, - zap.Strings("shard_ids", shardIDs), - evacuationOperationLogField, - zap.Uint64("total_objects", res.ObjectsTotal()), - zap.Uint64("evacuated_objects", res.ObjectsEvacuated()), - zap.Uint64("failed_objects", res.ObjectsFailed()), - zap.Uint64("skipped_objects", res.ObjectsSkipped()), - zap.Uint64("total_trees", res.TreesTotal()), - zap.Uint64("evacuated_trees", res.TreesEvacuated()), - zap.Uint64("failed_trees", res.TreesFailed()), - ) - return nil -} - -func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) ( - context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group, -) { - operationCtx, cancel := context.WithCancelCause(ctx) - egObject, _ := errgroup.WithContext(operationCtx) - objectWorkerCount := prm.ObjectWorkerCount - if objectWorkerCount == 0 { - objectWorkerCount = objectWorkerCountDefault - } - egObject.SetLimit(int(objectWorkerCount)) - egContainer, _ := errgroup.WithContext(operationCtx) - containerWorkerCount := prm.ContainerWorkerCount - if containerWorkerCount == 0 { - containerWorkerCount = containerWorkerCountDefault - } - egContainer.SetLimit(int(containerWorkerCount)) - egShard, _ := errgroup.WithContext(operationCtx) - - return operationCtx, cancel, egShard, egContainer, egObject -} - -func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals") - defer span.End() - - for _, sh := range shardsToEvacuate { - if prm.Scope.WithObjects() { - cnt, err := sh.LogicalObjectsCount(ctx) - if err != nil { - if errors.Is(err, shard.ErrDegradedMode) { - continue - } - return err - } - res.objTotal.Add(cnt) - } - if prm.Scope.WithTrees() && sh.PiloramaEnabled() { - cnt, err := pilorama.TreeCountAll(ctx, sh) - if err != nil { - return err - } - res.trTotal.Add(cnt) - } - } - return nil -} - -func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, - egContainer *errgroup.Group, egObject *errgroup.Group, -) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", - trace.WithAttributes( - attribute.String("shardID", shardID), - )) - defer span.End() - - if prm.Scope.WithObjects() { - if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil { - return err - } - } - if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() { - if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { - return err - } - } - - return nil -} - -func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, - egContainer *errgroup.Group, egObject *errgroup.Group, -) error { - sh := shardsToEvacuate[shardID] - var cntPrm shard.IterateOverContainersPrm - cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error { - select { - case <-ctx.Done(): - return context.Cause(ctx) - default: - } - egContainer.Go(func() error { - var skip bool - c, err := e.containerSource.Load().cs.Get(ctx, cnt) - if err != nil { - if client.IsErrContainerNotFound(err) { - skip = true - } else { - return err - } - } - if !skip && prm.RepOneOnly { - skip = e.isNotRepOne(c) - } - if skip { - countPrm := shard.CountAliveObjectsInContainerPrm{ - ObjectType: objType, - ContainerID: cnt, - } - count, err := sh.CountAliveObjectsInContainer(ctx, countPrm) - if err != nil { - return err - } - res.objSkipped.Add(count) - return nil - } - var objPrm shard.IterateOverObjectsInContainerPrm - objPrm.ObjectType = objType - objPrm.ContainerID = cnt - objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error { - select { - case <-ctx.Done(): - return context.Cause(ctx) - default: - } - egObject.Go(func() error { - err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value) - if err != nil { - cancel(err) - } - return err - }) - return nil - } - err = sh.IterateOverObjectsInContainer(ctx, objPrm) - if err != nil { - cancel(err) - } - return err - }) - return nil - } - - sh.SetEvacuationInProgress(true) - err := sh.IterateOverContainers(ctx, cntPrm) - if err != nil { - cancel(err) - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField) - } - return err -} - -func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, -) error { - sh := shardsToEvacuate[shardID] - shards := getShards() - - var listPrm pilorama.TreeListTreesPrm - first := true - - for len(listPrm.NextPageToken) > 0 || first { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - first = false - - listRes, err := sh.TreeListTrees(ctx, listPrm) - if err != nil { - return err - } - listPrm.NextPageToken = listRes.NextPageToken - if err := e.evacuateTrees(ctx, sh, listRes.Items, prm, res, shards, shardsToEvacuate); err != nil { - return err - } - } - return nil -} - -func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, -) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", - trace.WithAttributes( - attribute.Int("trees_count", len(trees)), - )) - defer span.End() - - for _, contTree := range trees { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - success, shardID, err := e.tryEvacuateTreeLocal(ctx, sh, contTree, prm, shards, shardsToEvacuate) - if err != nil { - return err - } - if success { - e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal, - zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), - zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID), - evacuationOperationLogField) - res.trEvacuated.Add(1) - continue - } - - moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm) - if err != nil { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, - zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), - zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err)) - return err - } - if moved { - e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote, - zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID), - zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK), - evacuationOperationLogField) - res.trEvacuated.Add(1) - } else if prm.IgnoreErrors { - res.trFailed.Add(1) - e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree, - zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), - zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err)) - } else { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, - zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), - zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err)) - return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID) - } - } - return nil -} - -func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) { - if prm.TreeHandler == nil { - return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) - } - - return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh) -} - -func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, -) (bool, string, error) { - target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) - if err != nil { - return false, "", err - } - if !found { - return false, "", nil - } - const readBatchSize = 1000 - source := make(chan *pilorama.Move, readBatchSize) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - var wg sync.WaitGroup - - wg.Add(1) - var applyErr error - go func() { - defer wg.Done() - - applyErr = target.TreeApplyStream(ctx, tree.CID, tree.TreeID, source) - if applyErr != nil { - cancel() - } - }() - - var height uint64 - for { - op, err := sh.TreeGetOpLog(ctx, tree.CID, tree.TreeID, height) - if err != nil { - cancel() - wg.Wait() - close(source) // close after cancel to ctx.Done() hits first - if prm.IgnoreErrors { - return false, "", nil - } - return false, "", err - } - - if op.Time == 0 { // completed get op log - close(source) - wg.Wait() - if applyErr == nil { - return true, target.ID().String(), nil - } - if prm.IgnoreErrors { - return false, "", nil - } - return false, "", applyErr - } - - select { - case <-ctx.Done(): // apply stream failed or operation cancelled - wg.Wait() - if prm.IgnoreErrors { - return false, "", nil - } - if applyErr != nil { - return false, "", applyErr - } - return false, "", ctx.Err() - case source <- &op: - } - - height = op.Time + 1 - } -} - -// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. -func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, - shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, -) (hashedShard, bool, error) { - hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) - var result hashedShard - var found bool - for _, target := range shards { - select { - case <-ctx.Done(): - return hashedShard{}, false, ctx.Err() - default: - } - - if _, ok := shardsToEvacuate[target.ID().String()]; ok { - continue - } - - if !target.PiloramaEnabled() || target.GetMode().ReadOnly() { - continue - } - - if !found { - result = target - found = true - } - - exists, err := target.TreeExists(ctx, tree.CID, tree.TreeID) - if err != nil { - continue - } - if exists { - return target, true, nil - } - } - return result, found, nil -} - -func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { - e.mtx.RLock() - defer e.mtx.RUnlock() - - for i := range shardIDs { - sh, ok := e.shards[shardIDs[i]] - if !ok { - return nil, errShardNotFound - } - - if !sh.GetMode().ReadOnly() { - return nil, ErrMustBeReadOnly - } - - if prm.Scope.TreesOnly() && !sh.PiloramaEnabled() { - return nil, fmt.Errorf("shard %s doesn't have pilorama enabled", sh.ID()) - } - } - - if len(e.shards)-len(shardIDs) < 1 && prm.ObjectsHandler == nil && prm.Scope.WithObjects() { - return nil, errMustHaveTwoShards - } - - if len(e.shards)-len(shardIDs) < 1 && prm.TreeHandler == nil && prm.Scope.WithTrees() { - return nil, errMustHaveTwoShards - } - - // We must have all shards, to have correct information about their - // indexes in a sorted slice and set appropriate marks in the metabase. - // Evacuated shard is skipped during put. - shards := make([]hashedShard, 0, len(e.shards)) - for id := range e.shards { - shards = append(shards, e.shards[id]) - } - return shards, nil -} - -func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, -) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") - defer span.End() - - select { - case <-ctx.Done(): - return context.Cause(ctx) - default: - } - - shards := getShards() - addr := objInfo.Address - - var getPrm shard.GetPrm - getPrm.SetAddress(addr) - getPrm.SkipEvacCheck(true) - - getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm) - if err != nil { - if prm.IgnoreErrors { - res.objFailed.Add(1) - return nil - } - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) - return err - } - - evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr) - if err != nil { - return err - } - - if evacuatedLocal { - return nil - } - - if prm.ObjectsHandler == nil { - // Do not check ignoreErrors flag here because - // ignoring errors on put make this command kinda useless. - return fmt.Errorf("%w: %s", errPutShard, objInfo) - } - - moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) - if err != nil { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) - return err - } - if moved { - res.objEvacuated.Add(1) - } else if prm.IgnoreErrors { - res.objFailed.Add(1) - e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) - } else { - return fmt.Errorf("object %s was not replicated", addr) - } - return nil -} - -func (e *StorageEngine) isNotRepOne(c *container.Container) bool { - p := c.Value.PlacementPolicy() - for i := range p.NumberOfReplicas() { - if p.ReplicaDescriptor(i).NumberOfObjects() > 1 { - return true - } - } - return false -} - -func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, -) (bool, error) { - hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) - for j := range shards { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - - if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { - continue - } - switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { - case putToShardSuccess: - res.objEvacuated.Add(1) - e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, - zap.Stringer("from", sh.ID()), - zap.Stringer("to", shards[j].ID()), - zap.Stringer("addr", addr), - evacuationOperationLogField) - return true, nil - case putToShardExists, putToShardRemoved: - res.objSkipped.Add(1) - return true, nil - default: - continue - } - } - - return false, nil -} - -func (e *StorageEngine) GetEvacuationState(ctx context.Context) (*EvacuationState, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - return e.evacuateLimiter.GetState(), nil -} - -func (e *StorageEngine) EnqueRunningEvacuationStop(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - return e.evacuateLimiter.CancelIfRunning() -} - -func (e *StorageEngine) ResetEvacuationStatus(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - return e.evacuateLimiter.ResetEvacuationStatus() -} - -func (e *StorageEngine) ResetEvacuationStatusForShards() { - e.mtx.RLock() - defer e.mtx.RUnlock() - for _, sh := range e.shards { - sh.SetEvacuationInProgress(false) - } -} diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go deleted file mode 100644 index b75e8686d..000000000 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ /dev/null @@ -1,223 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "slices" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "golang.org/x/sync/errgroup" -) - -type EvacuateProcessState int - -const ( - EvacuateProcessStateUndefined EvacuateProcessState = iota - EvacuateProcessStateRunning - EvacuateProcessStateCompleted -) - -type EvacuationState struct { - shardIDs []string - processState EvacuateProcessState - startedAt time.Time - finishedAt time.Time - result *EvacuateShardRes - errMessage string -} - -func (s *EvacuationState) ShardIDs() []string { - if s == nil { - return nil - } - return s.shardIDs -} - -func (s *EvacuationState) ObjectsEvacuated() uint64 { - if s == nil { - return 0 - } - return s.result.ObjectsEvacuated() -} - -func (s *EvacuationState) ObjectsTotal() uint64 { - if s == nil { - return 0 - } - return s.result.ObjectsTotal() -} - -func (s *EvacuationState) ObjectsFailed() uint64 { - if s == nil { - return 0 - } - return s.result.ObjectsFailed() -} - -func (s *EvacuationState) ObjectsSkipped() uint64 { - if s == nil { - return 0 - } - return s.result.ObjectsSkipped() -} - -func (s *EvacuationState) TreesEvacuated() uint64 { - if s == nil { - return 0 - } - return s.result.TreesEvacuated() -} - -func (s *EvacuationState) TreesTotal() uint64 { - if s == nil { - return 0 - } - return s.result.TreesTotal() -} - -func (s *EvacuationState) TreesFailed() uint64 { - if s == nil { - return 0 - } - return s.result.TreesFailed() -} - -func (s *EvacuationState) ProcessingStatus() EvacuateProcessState { - if s == nil { - return EvacuateProcessStateUndefined - } - return s.processState -} - -func (s *EvacuationState) StartedAt() *time.Time { - if s == nil { - return nil - } - if s.startedAt.IsZero() { - return nil - } - return &s.startedAt -} - -func (s *EvacuationState) FinishedAt() *time.Time { - if s == nil { - return nil - } - if s.finishedAt.IsZero() { - return nil - } - return &s.finishedAt -} - -func (s *EvacuationState) ErrorMessage() string { - if s == nil { - return "" - } - return s.errMessage -} - -func (s *EvacuationState) DeepCopy() *EvacuationState { - if s == nil { - return nil - } - shardIDs := slices.Clone(s.shardIDs) - - return &EvacuationState{ - shardIDs: shardIDs, - processState: s.processState, - startedAt: s.startedAt, - finishedAt: s.finishedAt, - errMessage: s.errMessage, - result: s.result.DeepCopy(), - } -} - -type evacuationLimiter struct { - state EvacuationState - eg *errgroup.Group - cancel context.CancelFunc - - guard *sync.RWMutex - statusCond *sync.Cond // used in unit tests -} - -func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { - l.guard.Lock() - defer l.guard.Unlock() - - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - default: - } - - if l.state.processState == EvacuateProcessStateRunning { - return nil, nil, logicerr.New(fmt.Sprintf("evacuate is already running for shard ids %v", l.state.shardIDs)) - } - - var egCtx context.Context - egCtx, l.cancel = context.WithCancel(ctx) - l.eg, egCtx = errgroup.WithContext(egCtx) - l.state = EvacuationState{ - shardIDs: shardIDs, - processState: EvacuateProcessStateRunning, - startedAt: time.Now().UTC(), - result: result, - } - l.statusCond.Broadcast() - - return l.eg, egCtx, nil -} - -func (l *evacuationLimiter) Complete(err error) { - l.guard.Lock() - defer l.guard.Unlock() - - errMsq := "" - if err != nil { - errMsq = err.Error() - } - l.state.processState = EvacuateProcessStateCompleted - l.state.errMessage = errMsq - l.state.finishedAt = time.Now().UTC() - l.statusCond.Broadcast() - - l.eg = nil -} - -func (l *evacuationLimiter) GetState() *EvacuationState { - l.guard.RLock() - defer l.guard.RUnlock() - - return l.state.DeepCopy() -} - -func (l *evacuationLimiter) CancelIfRunning() error { - l.guard.Lock() - defer l.guard.Unlock() - - if l.state.processState != EvacuateProcessStateRunning { - return logicerr.New("there is no running evacuation task") - } - - l.cancel() - return nil -} - -func (l *evacuationLimiter) ResetEvacuationStatus() error { - l.guard.Lock() - defer l.guard.Unlock() - - if l.state.processState == EvacuateProcessStateRunning { - return logicerr.New("there is running evacuation task") - } - - l.state = EvacuationState{} - l.eg = nil - l.cancel = nil - l.statusCond.Broadcast() - - return nil -} diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go deleted file mode 100644 index f2ba7d994..000000000 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ /dev/null @@ -1,827 +0,0 @@ -package engine - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "strconv" - "sync" - "sync/atomic" - "testing" - "time" - - coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -type containerStorage struct { - cntmap map[cid.ID]*container.Container - latency time.Duration -} - -func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) { - time.Sleep(cs.latency) - v, ok := cs.cntmap[id] - if !ok { - return nil, new(apistatus.ContainerNotFound) - } - coreCnt := coreContainer.Container{ - Value: *v, - } - return &coreCnt, nil -} - -func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { - return nil, nil -} - -func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) { - dir := t.TempDir() - - te := testNewEngine(t). - setShardsNumOpts(t, shardNum, func(id int) []shard.Option { - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages([]blobstor.SubStorage{{ - Storage: fstree.New( - fstree.WithPath(filepath.Join(dir, strconv.Itoa(id))), - fstree.WithDepth(1)), - }})), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{})), - shard.WithPiloramaOptions( - pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))), - pilorama.WithPerm(0o700), - ), - } - }). - prepare(t) - e, ids := te.engine, te.shardIDs - - objects := make([]*objectSDK.Object, 0, objPerShard*len(ids)) - treeID := "version" - meta := []pilorama.KeyValue{ - {Key: pilorama.AttributeVersion, Value: []byte("XXX")}, - {Key: pilorama.AttributeFilename, Value: []byte("file.txt")}, - } - cnrMap := make(map[cid.ID]*container.Container) - for _, sh := range ids { - for i := range objPerShard { - // Create dummy container - cnr1 := container.Container{} - cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i)) - contID := cidtest.ID() - cnrMap[contID] = &cnr1 - - obj := testutil.GenerateObjectWithCID(contID) - objects = append(objects, obj) - - var putPrm shard.PutPrm - putPrm.SetObject(obj) - _, err := e.shards[sh.String()].Put(context.Background(), putPrm) - require.NoError(t, err) - - _, err = e.shards[sh.String()].TreeAddByPath(context.Background(), pilorama.CIDDescriptor{CID: contID, Position: 0, Size: 1}, - treeID, pilorama.AttributeFilename, []string{"path", "to", "the", "file"}, meta) - require.NoError(t, err) - } - } - e.SetContainerSource(&containerStorage{cntmap: cnrMap}) - return e, ids, objects -} - -func TestEvacuateShardObjects(t *testing.T) { - t.Parallel() - - const objPerShard = 3 - - e, ids, objects := newEngineEvacuate(t, 3, objPerShard) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - evacuateShardID := ids[2].String() - - checkHasObjects := func(t *testing.T) { - for i := range objects { - var prm GetPrm - prm.WithAddress(objectCore.AddressOf(objects[i])) - - _, err := e.Get(context.Background(), prm) - require.NoError(t, err) - } - } - - checkHasObjects(t) - - var prm EvacuateShardPrm - prm.ShardID = ids[2:3] - prm.Scope = EvacuateScopeObjects - - t.Run("must be read-only", func(t *testing.T) { - err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, ErrMustBeReadOnly) - }) - - require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) - - err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - st := testWaitForEvacuationCompleted(t, e) - require.Equal(t, st.ErrorMessage(), "") - require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated()) - - // We check that all objects are available both before and after shard removal. - // First case is a real-world use-case. It ensures that an object can be put in presense - // of all metabase checks/marks. - // Second case ensures that all objects are indeed moved and available. - checkHasObjects(t) - - // Objects on evacuated shards should be logically unavailable, but persisted on disk. - // This is necessary to prevent removing it by policer in case of `REP 1` policy. - for _, obj := range objects[len(objects)-objPerShard:] { - var prmGet shard.GetPrm - prmGet.SetAddress(objectCore.AddressOf(obj)) - _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet) - require.Error(t, err) - - prmGet.SkipEvacCheck(true) - _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet) - require.NoError(t, err) - - var prmHead shard.HeadPrm - prmHead.SetAddress(objectCore.AddressOf(obj)) - _, err = e.shards[evacuateShardID].Head(context.Background(), prmHead) - require.Error(t, err) - - var existsPrm shard.ExistsPrm - existsPrm.Address = objectCore.AddressOf(obj) - _, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm) - require.Error(t, err) - - var rngPrm shard.RngPrm - rngPrm.SetAddress(objectCore.AddressOf(obj)) - _, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm) - require.Error(t, err) - } - - // Calling it again is OK, but all objects are already moved, so no new PUTs should be done. - require.NoError(t, e.Evacuate(context.Background(), prm)) - st = testWaitForEvacuationCompleted(t, e) - require.Equal(t, st.ErrorMessage(), "") - require.Equal(t, uint64(0), st.ObjectsEvacuated()) - - checkHasObjects(t) - - e.mtx.Lock() - delete(e.shards, evacuateShardID) - e.mtx.Unlock() - - checkHasObjects(t) -} - -func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { - var st *EvacuationState - var err error - e.evacuateLimiter.waitForCompleted() - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err) - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) - return st -} - -func TestEvacuateObjectsNetwork(t *testing.T) { - t.Parallel() - - errReplication := errors.New("handler error") - - acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) { - var n atomic.Uint64 - var mtx sync.Mutex - return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - mtx.Lock() - defer mtx.Unlock() - if n.Load() == max { - return false, errReplication - } - - n.Add(1) - for i := range objects { - if addr == objectCore.AddressOf(objects[i]) { - require.Equal(t, objects[i], obj) - return true, nil - } - } - require.FailNow(t, "handler was called with an unexpected object: %s", addr) - panic("unreachable") - } - } - - t.Run("single shard", func(t *testing.T) { - t.Parallel() - e, ids, objects := newEngineEvacuate(t, 1, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - evacuateShardID := ids[0].String() - - require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) - - var prm EvacuateShardPrm - prm.ShardID = ids[0:1] - prm.Scope = EvacuateScopeObjects - - err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errMustHaveTwoShards) - - prm.ObjectsHandler = acceptOneOf(objects, 2) - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Contains(t, st.ErrorMessage(), errReplication.Error()) - require.Equal(t, uint64(2), st.ObjectsEvacuated()) - }) - t.Run("multiple shards, evacuate one", func(t *testing.T) { - t.Parallel() - e, ids, objects := newEngineEvacuate(t, 2, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - - var prm EvacuateShardPrm - prm.ShardID = ids[1:2] - prm.ObjectsHandler = acceptOneOf(objects, 2) - prm.Scope = EvacuateScopeObjects - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Contains(t, st.ErrorMessage(), errReplication.Error()) - require.Equal(t, uint64(2), st.ObjectsEvacuated()) - - t.Run("no errors", func(t *testing.T) { - prm.ObjectsHandler = acceptOneOf(objects, 3) - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Equal(t, st.ErrorMessage(), "") - require.Equal(t, uint64(3), st.ObjectsEvacuated()) - }) - }) - t.Run("multiple shards, evacuate many", func(t *testing.T) { - t.Parallel() - e, ids, objects := newEngineEvacuate(t, 4, 5) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - evacuateIDs := ids[0:3] - - var totalCount uint64 - for i := range evacuateIDs { - res, err := e.shards[ids[i].String()].List(context.Background()) - require.NoError(t, err) - - totalCount += uint64(len(res.AddressList())) - } - - for i := range ids { - require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly)) - } - - var prm EvacuateShardPrm - prm.ShardID = evacuateIDs - prm.ObjectsHandler = acceptOneOf(objects, totalCount-1) - prm.Scope = EvacuateScopeObjects - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Contains(t, st.ErrorMessage(), errReplication.Error()) - require.Equal(t, totalCount-1, st.ObjectsEvacuated()) - - t.Run("no errors", func(t *testing.T) { - prm.ObjectsHandler = acceptOneOf(objects, totalCount) - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Equal(t, st.ErrorMessage(), "") - require.Equal(t, totalCount, st.ObjectsEvacuated()) - }) - }) -} - -func TestEvacuateCancellation(t *testing.T) { - t.Parallel() - e, ids, _ := newEngineEvacuate(t, 2, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - - var prm EvacuateShardPrm - prm.ShardID = ids[1:2] - prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - return true, nil - } - prm.Scope = EvacuateScopeObjects - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - err := e.Evacuate(ctx, prm) - require.ErrorContains(t, err, "context canceled") -} - -func TestEvacuateCancellationByError(t *testing.T) { - t.Parallel() - e, ids, _ := newEngineEvacuate(t, 2, 10) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - - var prm EvacuateShardPrm - prm.ShardID = ids[1:2] - var once atomic.Bool - prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) { - var err error - flag := true - if once.CompareAndSwap(false, true) { - err = errors.New("test error") - flag = false - } - return flag, err - } - prm.Scope = EvacuateScopeObjects - prm.ObjectWorkerCount = 2 - prm.ContainerWorkerCount = 2 - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Contains(t, st.ErrorMessage(), "test error") -} - -func TestEvacuateSingleProcess(t *testing.T) { - e, ids, _ := newEngineEvacuate(t, 2, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - - blocker := make(chan any) - running := make(chan any) - - var prm EvacuateShardPrm - prm.ShardID = ids[1:2] - prm.Scope = EvacuateScopeObjects - prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) { - select { - case <-running: - default: - close(running) - } - <-blocker - return true, nil - } - - eg, egCtx := errgroup.WithContext(context.Background()) - eg.Go(func() error { - require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") - return nil - }) - eg.Go(func() error { - <-running - require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed") - close(blocker) - return nil - }) - require.NoError(t, eg.Wait()) - st := testWaitForEvacuationCompleted(t, e) - require.Equal(t, uint64(3), st.ObjectsEvacuated()) - require.Equal(t, st.ErrorMessage(), "") -} - -func TestEvacuateObjectsAsync(t *testing.T) { - e, ids, _ := newEngineEvacuate(t, 2, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - - blocker := make(chan any) - running := make(chan any) - - var prm EvacuateShardPrm - prm.ShardID = ids[1:2] - prm.Scope = EvacuateScopeObjects - prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) { - select { - case <-running: - default: - close(running) - } - <-blocker - return true, nil - } - - st, err := e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get init state failed") - require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state") - require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid init count") - require.Nil(t, st.StartedAt(), "invalid init started at") - require.Nil(t, st.FinishedAt(), "invalid init finished at") - require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - - eg, egCtx := errgroup.WithContext(context.Background()) - eg.Go(func() error { - require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") - st := testWaitForEvacuationCompleted(t, e) - require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") - return nil - }) - - <-running - - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get running state failed") - require.Equal(t, EvacuateProcessStateRunning, st.ProcessingStatus(), "invalid running state") - require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid running count") - require.NotNil(t, st.StartedAt(), "invalid running started at") - require.Nil(t, st.FinishedAt(), "invalid init finished at") - expectedShardIDs := make([]string, 0, 2) - for _, id := range ids[1:2] { - expectedShardIDs = append(expectedShardIDs, id.String()) - } - require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid running shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - - require.Error(t, e.ResetEvacuationStatus(context.Background())) - - close(blocker) - - st = testWaitForEvacuationCompleted(t, e) - require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") - require.NotNil(t, st.StartedAt(), "invalid final started at") - require.NotNil(t, st.FinishedAt(), "invalid final finished at") - require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid final error message") - - require.NoError(t, eg.Wait()) - - require.NoError(t, e.ResetEvacuationStatus(context.Background())) - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get state after reset failed") - require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid state after reset") - require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid count after reset") - require.Nil(t, st.StartedAt(), "invalid started at after reset") - require.Nil(t, st.FinishedAt(), "invalid finished at after reset") - require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid shard ids after reset") - require.Equal(t, "", st.ErrorMessage(), "invalid error message after reset") -} - -func TestEvacuateTreesLocal(t *testing.T) { - e, ids, _ := newEngineEvacuate(t, 2, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - - var prm EvacuateShardPrm - prm.ShardID = ids[0:1] - prm.Scope = EvacuateScopeTrees - - expectedShardIDs := make([]string, 0, 1) - for _, id := range ids[0:1] { - expectedShardIDs = append(expectedShardIDs, id.String()) - } - - st, err := e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get init state failed") - require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state") - require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count") - require.Nil(t, st.StartedAt(), "invalid init started at") - require.Nil(t, st.FinishedAt(), "invalid init finished at") - require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - - require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") - - st = testWaitForEvacuationCompleted(t, e) - require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count") - require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count") - require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") - require.NotNil(t, st.StartedAt(), "invalid final started at") - require.NotNil(t, st.FinishedAt(), "invalid final finished at") - require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid final error message") - - sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[0].String()]) - require.NoError(t, err, "list source trees failed") - require.Len(t, sourceTrees, 3) - - for _, tr := range sourceTrees { - exists, err := e.shards[ids[1].String()].TreeExists(context.Background(), tr.CID, tr.TreeID) - require.NoError(t, err, "failed to check tree existance") - require.True(t, exists, "tree doesn't exists on target shard") - - var height uint64 - var sourceOps []pilorama.Move - for { - op, err := e.shards[ids[0].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height) - require.NoError(t, err) - if op.Time == 0 { - break - } - sourceOps = append(sourceOps, op) - height = op.Time + 1 - } - - height = 0 - var targetOps []pilorama.Move - for { - op, err := e.shards[ids[1].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height) - require.NoError(t, err) - if op.Time == 0 { - break - } - targetOps = append(targetOps, op) - height = op.Time + 1 - } - - require.Equal(t, sourceOps, targetOps) - } -} - -func TestEvacuateTreesRemote(t *testing.T) { - e, ids, _ := newEngineEvacuate(t, 2, 3) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - - mutex := sync.Mutex{} - evacuatedTreeOps := make(map[string][]*pilorama.Move) - var prm EvacuateShardPrm - prm.ShardID = ids - prm.Scope = EvacuateScopeTrees - prm.TreeHandler = func(ctx context.Context, contID cid.ID, treeID string, f pilorama.Forest) (bool, string, error) { - key := contID.String() + treeID - var height uint64 - for { - op, err := f.TreeGetOpLog(ctx, contID, treeID, height) - require.NoError(t, err) - - if op.Time == 0 { - return true, "", nil - } - mutex.Lock() - evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op) - mutex.Unlock() - height = op.Time + 1 - } - } - - expectedShardIDs := make([]string, 0, len(ids)) - for _, id := range ids { - expectedShardIDs = append(expectedShardIDs, id.String()) - } - - st, err := e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get init state failed") - require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state") - require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count") - require.Nil(t, st.StartedAt(), "invalid init started at") - require.Nil(t, st.FinishedAt(), "invalid init finished at") - require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - - require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") - st = testWaitForEvacuationCompleted(t, e) - - require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count") - require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count") - require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") - require.NotNil(t, st.StartedAt(), "invalid final started at") - require.NotNil(t, st.FinishedAt(), "invalid final finished at") - require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids") - require.Equal(t, "", st.ErrorMessage(), "invalid final error message") - - expectedTreeOps := make(map[string][]*pilorama.Move) - for i := range len(e.shards) { - sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()]) - require.NoError(t, err, "list source trees failed") - require.Len(t, sourceTrees, 3) - - for _, tr := range sourceTrees { - key := tr.CID.String() + tr.TreeID - var height uint64 - for { - op, err := e.shards[ids[i].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height) - require.NoError(t, err) - - if op.Time == 0 { - break - } - expectedTreeOps[key] = append(expectedTreeOps[key], &op) - height = op.Time + 1 - } - } - } - - require.Equal(t, expectedTreeOps, evacuatedTreeOps) -} - -func TestEvacuateShardObjectsRepOneOnly(t *testing.T) { - e, ids, _ := newEngineEvacuate(t, 2, 0) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - // Create container with policy REP 2 - cnr1 := container.Container{} - p1 := netmap.PlacementPolicy{} - p1.SetContainerBackupFactor(1) - x1 := netmap.ReplicaDescriptor{} - x1.SetNumberOfObjects(2) - p1.AddReplicas(x1) - x1 = netmap.ReplicaDescriptor{} - x1.SetNumberOfObjects(1) - p1.AddReplicas(x1) - cnr1.SetPlacementPolicy(p1) - cnr1.SetAttribute("cnr", "cnr1") - - var idCnr1 cid.ID - container.CalculateID(&idCnr1, cnr1) - - cnrmap := make(map[cid.ID]*container.Container) - var cids []cid.ID - cnrmap[idCnr1] = &cnr1 - cids = append(cids, idCnr1) - - // Create container with policy REP 1 - cnr2 := container.Container{} - p2 := netmap.PlacementPolicy{} - p2.SetContainerBackupFactor(1) - x2 := netmap.ReplicaDescriptor{} - x2.SetNumberOfObjects(1) - p2.AddReplicas(x2) - x2 = netmap.ReplicaDescriptor{} - x2.SetNumberOfObjects(1) - p2.AddReplicas(x2) - cnr2.SetPlacementPolicy(p2) - cnr2.SetAttribute("cnr", "cnr2") - - var idCnr2 cid.ID - container.CalculateID(&idCnr2, cnr2) - cnrmap[idCnr2] = &cnr2 - cids = append(cids, idCnr2) - - // Create container for simulate removing - cnr3 := container.Container{} - p3 := netmap.PlacementPolicy{} - p3.SetContainerBackupFactor(1) - x3 := netmap.ReplicaDescriptor{} - x3.SetNumberOfObjects(1) - p3.AddReplicas(x3) - cnr3.SetPlacementPolicy(p3) - cnr3.SetAttribute("cnr", "cnr3") - - var idCnr3 cid.ID - container.CalculateID(&idCnr3, cnr3) - cids = append(cids, idCnr3) - - e.SetContainerSource(&containerStorage{cntmap: cnrmap}) - - for _, sh := range ids { - for j := range 3 { - for range 4 { - obj := testutil.GenerateObjectWithCID(cids[j]) - var putPrm shard.PutPrm - putPrm.SetObject(obj) - _, err := e.shards[sh.String()].Put(context.Background(), putPrm) - require.NoError(t, err) - } - } - } - - var prm EvacuateShardPrm - prm.ShardID = ids[0:1] - prm.Scope = EvacuateScopeObjects - prm.RepOneOnly = true - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - - require.NoError(t, e.Evacuate(context.Background(), prm)) - st := testWaitForEvacuationCompleted(t, e) - require.Equal(t, "", st.ErrorMessage()) - require.Equal(t, uint64(4), st.ObjectsEvacuated()) - require.Equal(t, uint64(8), st.ObjectsSkipped()) - require.Equal(t, uint64(0), st.ObjectsFailed()) -} - -func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { - t.Skip() - e, ids, _ := newEngineEvacuate(t, 2, 0) - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - cnrmap := make(map[cid.ID]*container.Container) - var cids []cid.ID - // Create containers with policy REP 1 - for i := range 10_000 { - cnr1 := container.Container{} - p1 := netmap.PlacementPolicy{} - p1.SetContainerBackupFactor(1) - x1 := netmap.ReplicaDescriptor{} - x1.SetNumberOfObjects(2) - p1.AddReplicas(x1) - cnr1.SetPlacementPolicy(p1) - cnr1.SetAttribute("i", strconv.Itoa(i)) - - var idCnr1 cid.ID - container.CalculateID(&idCnr1, cnr1) - - cnrmap[idCnr1] = &cnr1 - cids = append(cids, idCnr1) - } - - e.SetContainerSource(&containerStorage{ - cntmap: cnrmap, - latency: time.Millisecond * 100, - }) - - for _, cnt := range cids { - for range 1 { - obj := testutil.GenerateObjectWithCID(cnt) - var putPrm shard.PutPrm - putPrm.SetObject(obj) - _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm) - require.NoError(t, err) - } - } - - var prm EvacuateShardPrm - prm.ShardID = ids[0:1] - prm.Scope = EvacuateScopeObjects - prm.RepOneOnly = true - prm.ContainerWorkerCount = 10 - - require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - - start := time.Now() - err := e.Evacuate(context.Background(), prm) - testWaitForEvacuationCompleted(t, e) - t.Logf("evacuate took %v\n", time.Since(start)) - require.NoError(t, err) -} - -func (l *evacuationLimiter) waitForCompleted() { - l.guard.Lock() - defer l.guard.Unlock() - - for l.state.processState != EvacuateProcessStateCompleted { - l.statusCond.Wait() - } -} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go deleted file mode 100644 index 7dac9eb97..000000000 --- a/pkg/local_object_storage/engine/exists.go +++ /dev/null @@ -1,62 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -// exists return in the first value true if object exists. -// Second return value marks is parent object locked. -func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool, bool, error) { - alreadyRemoved := false - exists := false - locked := false - - if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { - res, err := sh.Exists(ctx, shPrm) - if err != nil { - if client.IsErrObjectAlreadyRemoved(err) { - alreadyRemoved = true - - return true - } - - var siErr *objectSDK.SplitInfoError - if errors.As(err, &siErr) { - return true - } - - if shard.IsErrObjectExpired(err) { - return true - } - - if !client.IsErrObjectNotFound(err) { - e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address)) - } - return false - } - - if !exists { - exists = res.Exists() - } - if !locked { - locked = res.Locked() - } - - return false - }); err != nil { - return false, false, err - } - - if alreadyRemoved { - return false, false, new(apistatus.ObjectAlreadyRemoved) - } - - return exists, locked, nil -} diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go deleted file mode 100644 index 9b3c0833f..000000000 --- a/pkg/local_object_storage/engine/exists_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func BenchmarkExists(b *testing.B) { - b.Run("2 shards", func(b *testing.B) { - benchmarkExists(b, 2) - }) - b.Run("4 shards", func(b *testing.B) { - benchmarkExists(b, 4) - }) - b.Run("8 shards", func(b *testing.B) { - benchmarkExists(b, 8) - }) -} - -func benchmarkExists(b *testing.B, shardNum int) { - e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine - defer func() { require.NoError(b, e.Close(context.Background())) }() - - addr := oidtest.Address() - for range 100 { - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - err := Put(context.Background(), e, obj, false) - if err != nil { - b.Fatal(err) - } - } - - b.ReportAllocs() - b.ResetTimer() - for range b.N { - var shPrm shard.ExistsPrm - shPrm.Address = addr - shPrm.ECParentAddress = oid.Address{} - ok, _, err := e.exists(context.Background(), shPrm) - if err != nil || ok { - b.Fatalf("%t %v", ok, err) - } - } -} diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go deleted file mode 100644 index 0694c53f3..000000000 --- a/pkg/local_object_storage/engine/get.go +++ /dev/null @@ -1,223 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// GetPrm groups the parameters of Get operation. -type GetPrm struct { - addr oid.Address -} - -// GetRes groups the resulting values of Get operation. -type GetRes struct { - obj *objectSDK.Object -} - -// WithAddress is a Get option to set the address of the requested object. -// -// Option is required. -func (p *GetPrm) WithAddress(addr oid.Address) { - p.addr = addr -} - -// Object returns the requested object. -func (r GetRes) Object() *objectSDK.Object { - return r.obj -} - -// Get reads an object from local storage. -// -// Returns any error encountered that -// did not allow to completely read the object part. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in local storage. -// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - defer elapsed("Get", e.metrics.AddMethodDuration)() - - err = e.execIfNotBlocked(func() error { - res, err = e.get(ctx, prm) - return err - }) - - return -} - -func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { - errNotFound := new(apistatus.ObjectNotFound) - - var shPrm shard.GetPrm - shPrm.SetAddress(prm.addr) - - it := &getShardIterator{ - OutError: errNotFound, - ShardPrm: shPrm, - Address: prm.addr, - Engine: e, - } - - if err := it.tryGetWithMeta(ctx); err != nil { - return GetRes{}, err - } - - if it.SplitInfo != nil { - return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) - } - - if it.ECInfo != nil { - return GetRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo)) - } - - if it.ObjectExpired { - return GetRes{}, errNotFound - } - - if it.Object == nil { - if !it.HasDegraded && it.ShardWithMeta.Shard == nil || !client.IsErrObjectNotFound(it.OutError) { - return GetRes{}, it.OutError - } - - if err := it.tryGetFromBlobstore(ctx); err != nil { - return GetRes{}, err - } - - if it.Object == nil { - return GetRes{}, it.OutError - } - if it.ShardWithMeta.Shard != nil && it.MetaError != nil { - e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, - zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.Error(it.MetaError), - zap.Stringer("address", prm.addr)) - } - } - - return GetRes{ - obj: it.Object, - }, nil -} - -type getShardIterator struct { - Object *objectSDK.Object - SplitInfo *objectSDK.SplitInfo - ECInfo *objectSDK.ECInfo - OutError error - ShardWithMeta hashedShard - MetaError error - HasDegraded bool - ObjectExpired bool - - ShardPrm shard.GetPrm - Address oid.Address - Engine *StorageEngine - - splitInfoErr *objectSDK.SplitInfoError - ecInfoErr *objectSDK.ECInfoError -} - -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { - noMeta := sh.GetMode().NoMetabase() - i.ShardPrm.SetIgnoreMeta(noMeta) - - i.HasDegraded = i.HasDegraded || noMeta - - res, err := sh.Get(ctx, i.ShardPrm) - if err == nil { - i.Object = res.Object() - return true - } - - if res.HasMeta() { - i.ShardWithMeta = sh - i.MetaError = err - } - switch { - case client.IsErrObjectNotFound(err): - return false // ignore, go to next shard - case errors.As(err, &i.splitInfoErr): - if i.SplitInfo == nil { - i.SplitInfo = objectSDK.NewSplitInfo() - } - - util.MergeSplitInfo(i.splitInfoErr.SplitInfo(), i.SplitInfo) - - _, withLink := i.SplitInfo.Link() - _, withLast := i.SplitInfo.LastPart() - - // stop iterating over shards if SplitInfo structure is complete - return withLink && withLast - case errors.As(err, &i.ecInfoErr): - if i.ECInfo == nil { - i.ECInfo = objectSDK.NewECInfo() - } - - util.MergeECInfo(i.ecInfoErr.ECInfo(), i.ECInfo) - // stop iterating over shards if ECInfo structure is complete - return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total) - case client.IsErrObjectAlreadyRemoved(err): - i.OutError = err - return true // stop, return it back - case shard.IsErrObjectExpired(err): - // object is found but should not be returned - i.ObjectExpired = true - return true - default: - i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) - return false - } - }) -} - -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { - // If the object is not found but is present in metabase, - // try to fetch it from blobstor directly. If it is found in any - // blobstor, increase the error counter for the shard which contains the meta. - i.ShardPrm.SetIgnoreMeta(true) - - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { - if sh.GetMode().NoMetabase() { - // Already visited. - return false - } - - res, err := sh.Get(ctx, i.ShardPrm) - i.Object = res.Object() - return err == nil - }) -} - -// Get reads object from local storage by provided address. -func Get(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) { - var getPrm GetPrm - getPrm.WithAddress(addr) - - res, err := storage.Get(ctx, getPrm) - if err != nil { - return nil, err - } - - return res.Object(), nil -} diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go deleted file mode 100644 index d436dd411..000000000 --- a/pkg/local_object_storage/engine/head.go +++ /dev/null @@ -1,153 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -// HeadPrm groups the parameters of Head operation. -type HeadPrm struct { - addr oid.Address - raw bool -} - -// HeadRes groups the resulting values of Head operation. -type HeadRes struct { - head *objectSDK.Object -} - -// WithAddress is a Head option to set the address of the requested object. -// -// Option is required. -func (p *HeadPrm) WithAddress(addr oid.Address) { - p.addr = addr -} - -// WithRaw is a Head option to set raw flag value. If flag is unset, then Head -// returns the header of the virtual object, otherwise it returns SplitInfo of the virtual -// object. -func (p *HeadPrm) WithRaw(raw bool) { - p.raw = raw -} - -// Header returns the requested object header. -// -// Instance has empty payload. -func (r HeadRes) Header() *objectSDK.Object { - return r.head -} - -// Head reads object header from local storage. -// -// Returns any error encountered that -// did not allow to completely read the object header. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in local storage. -// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object was inhumed. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err error) { - err = e.execIfNotBlocked(func() error { - res, err = e.head(ctx, prm) - return err - }) - - return -} - -func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head") - defer span.End() - defer elapsed("Head", e.metrics.AddMethodDuration)() - - var ( - head *objectSDK.Object - siErr *objectSDK.SplitInfoError - outSI *objectSDK.SplitInfo - eiErr *objectSDK.ECInfoError - outEI *objectSDK.ECInfo - outError error = new(apistatus.ObjectNotFound) - shPrm shard.HeadPrm - ) - shPrm.SetAddress(prm.addr) - shPrm.SetRaw(prm.raw) - - if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { - shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold - res, err := sh.Head(ctx, shPrm) - if err != nil { - switch { - case client.IsErrObjectNotFound(err): - return false // ignore, go to next shard - case errors.As(err, &siErr): - if outSI == nil { - outSI = objectSDK.NewSplitInfo() - } - util.MergeSplitInfo(siErr.SplitInfo(), outSI) - _, withLink := outSI.Link() - _, withLast := outSI.LastPart() - // stop iterating over shards if SplitInfo structure is complete - if withLink && withLast { - return true - } - return false - case errors.As(err, &eiErr): - if outEI == nil { - outEI = objectSDK.NewECInfo() - } - util.MergeECInfo(eiErr.ECInfo(), outEI) - // stop iterating over shards if ECInfo structure is complete - return len(outEI.Chunks) == int(outEI.Chunks[0].Total) - case client.IsErrObjectAlreadyRemoved(err): - outError = err - return true // stop, return it back - case shard.IsErrObjectExpired(err): - // object is found but should not - // be returned - outError = new(apistatus.ObjectNotFound) - return true - default: - e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr)) - return false - } - } - head = res.Object() - return true - }); err != nil { - return HeadRes{}, err - } - - if head != nil { - return HeadRes{head: head}, nil - } - if outSI != nil { - return HeadRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI)) - } - if outEI != nil { - return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI)) - } - return HeadRes{}, outError -} - -// Head reads object header from local storage by provided address. -func Head(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) { - var headPrm HeadPrm - headPrm.WithAddress(addr) - - res, err := storage.Head(ctx, headPrm) - if err != nil { - return nil, err - } - - return res.Header(), nil -} diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go deleted file mode 100644 index f9db81f16..000000000 --- a/pkg/local_object_storage/engine/head_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestHeadRaw(t *testing.T) { - cnr := cidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(parent, "foo", "bar") - - var parentAddr oid.Address - parentAddr.SetContainer(cnr) - - idParent, _ := parent.ID() - parentAddr.SetObject(idParent) - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - child.SetParentID(idParent) - child.SetSplitID(splitID) - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(idParent) - - idChild, _ := child.ID() - link.SetChildren(idChild) - link.SetSplitID(splitID) - - t.Run("virtual object split in different shards", func(t *testing.T) { - te := testNewEngine(t).setShardsNum(t, 2).prepare(t) - e := te.engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - s1, s2 := te.shards[0], te.shards[1] - - var putPrmLeft shard.PutPrm - putPrmLeft.SetObject(child) - - var putPrmLink shard.PutPrm - putPrmLink.SetObject(link) - - // put most left object in one shard - _, err := s1.Put(context.Background(), putPrmLeft) - require.NoError(t, err) - - // put link object in another shard - _, err = s2.Put(context.Background(), putPrmLink) - require.NoError(t, err) - - // head with raw flag should return SplitInfoError - var headPrm HeadPrm - headPrm.WithAddress(parentAddr) - headPrm.WithRaw(true) - - _, err = e.Head(context.Background(), headPrm) - require.Error(t, err) - - var si *objectSDK.SplitInfoError - require.ErrorAs(t, err, &si) - - // SplitInfoError should contain info from both shards - require.Equal(t, splitID, si.SplitInfo().SplitID()) - - id1, _ := child.ID() - id2, _ := si.SplitInfo().LastPart() - require.Equal(t, id1, id2) - - id1, _ = link.ID() - id2, _ = si.SplitInfo().Link() - require.Equal(t, id1, id2) - }) -} diff --git a/pkg/local_object_storage/engine/info.go b/pkg/local_object_storage/engine/info.go deleted file mode 100644 index 41b75af60..000000000 --- a/pkg/local_object_storage/engine/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package engine - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" -) - -// Info groups the information about StorageEngine. -type Info struct { - Shards []shard.Info -} - -// DumpInfo returns information about the StorageEngine. -func (e *StorageEngine) DumpInfo() (i Info) { - e.mtx.RLock() - defer e.mtx.RUnlock() - - i.Shards = make([]shard.Info, 0, len(e.shards)) - - for _, sh := range e.shards { - info := sh.DumpInfo() - info.ErrorCount = sh.errorCount.Load() - i.Shards = append(i.Shards, info) - } - - return -} diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go deleted file mode 100644 index e5f7072e2..000000000 --- a/pkg/local_object_storage/engine/inhume.go +++ /dev/null @@ -1,568 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// InhumePrm encapsulates parameters for inhume operation. -type InhumePrm struct { - tombstone *oid.Address - addrs []oid.Address - - forceRemoval bool -} - -// WithTarget sets a list of objects that should be inhumed and tombstone address -// as the reason for inhume operation. -// -// tombstone should not be nil, addr should not be empty. -// Should not be called along with MarkAsGarbage. -func (p *InhumePrm) WithTarget(tombstone oid.Address, addrs ...oid.Address) { - p.addrs = addrs - p.tombstone = &tombstone -} - -// MarkAsGarbage marks an object to be physically removed from local storage. -// -// Should not be called along with WithTarget. -func (p *InhumePrm) MarkAsGarbage(addrs ...oid.Address) { - p.addrs = addrs - p.tombstone = nil -} - -// WithForceRemoval inhumes objects specified via MarkAsGarbage with GC mark -// without any object restrictions checks. -func (p *InhumePrm) WithForceRemoval() { - p.forceRemoval = true - p.tombstone = nil -} - -var errInhumeFailure = errors.New("inhume operation failed") - -// Inhume calls metabase. Inhume method to mark an object as removed. It won't be -// removed physically from the shard until `Delete` operation. -// -// Allows inhuming non-locked objects only. Returns apistatus.ObjectLocked -// if at least one object is locked. -// -// NOTE: Marks any object as removed (despite any prohibitions on operations -// with that object) if WithForceRemoval option has been provided. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") - defer span.End() - defer elapsed("Inhume", e.metrics.AddMethodDuration)() - - return e.execIfNotBlocked(func() error { - return e.inhume(ctx, prm) - }) -} - -func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { - addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) - if err != nil { - return err - } - - var shPrm shard.InhumePrm - if prm.forceRemoval { - shPrm.ForceRemoval() - } - - for shardID, addrs := range addrsPerShard { - if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, addrs...) - } else { - shPrm.MarkAsGarbage(addrs...) - } - - sh, exists := e.shards[shardID] - if !exists { - e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard, - zap.Error(errors.New("this shard was expected to exist")), - zap.String("shard_id", shardID), - ) - return errInhumeFailure - } - - if _, err := sh.Inhume(ctx, shPrm); err != nil { - e.reportInhumeError(ctx, err, sh) - return err - } - } - - return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) -} - -func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { - if err == nil { - return - } - - var errLocked *apistatus.ObjectLocked - switch { - case errors.As(err, &errLocked): - case errors.Is(err, shard.ErrLockObjectRemoval): - case errors.Is(err, shard.ErrReadOnlyMode): - case errors.Is(err, shard.ErrDegradedMode): - default: - e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) - } -} - -// inhumeNotFoundObjects removes object which are not found on any shard. -// -// Besides an object not being found on any shard, it is also important to -// remove it anyway in order to populate the metabase indexes because they are -// responsible for the correct object status, i.e., the status will be `object -// not found` without the indexes, the status will be `object is already -// removed` with the indexes. -// -// It is suggested to evenly remove those objects on each shard with the batch -// size equal to 1 + floor(number of objects / number of shards). -func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { - if len(addrs) == 0 { - return nil - } - - var shPrm shard.InhumePrm - if prm.forceRemoval { - shPrm.ForceRemoval() - } - - numObjectsPerShard := 1 + len(addrs)/len(e.shards) - - var inhumeErr error - itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { - numObjects := min(numObjectsPerShard, len(addrs)) - - if numObjects == 0 { - return true - } - - if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) - } else { - shPrm.MarkAsGarbage(addrs[:numObjects]...) - } - addrs = addrs[numObjects:] - - _, inhumeErr = hs.Inhume(ctx, shPrm) - e.reportInhumeError(ctx, inhumeErr, hs) - return inhumeErr != nil - }) - if inhumeErr != nil { - return inhumeErr - } - return itErr -} - -// groupObjectsByShard groups objects based on the shard(s) they are stored on. -// -// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of -// the objects are locked. -// -// Returns two sets of objects: found objects which are grouped per shard and -// not found object. Not found objects are objects which are not found on any -// shard. This can happen if a node is a container node but doesn't participate -// in a replica group of the object. -func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { - groups = make(map[string][]oid.Address) - - var ids []string - for _, addr := range addrs { - ids, err = e.findShards(ctx, addr, checkLocked) - if err != nil { - return - } - - if len(ids) == 0 { - notFoundObjects = append(notFoundObjects, addr) - continue - } - - for _, id := range ids { - groups[id] = append(groups[id], addr) - } - } - - return -} - -// findShards determines the shard(s) where the object is stored. -// -// If the object is a root object, multiple shards will be returned. -// -// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of -// the objects are locked. -func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) { - var ( - ids []string - retErr error - - prm shard.ExistsPrm - - siErr *objectSDK.SplitInfoError - ecErr *objectSDK.ECInfoError - - isRootObject bool - objectExists bool - ) - - if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { - objectExists = false - - prm.Address = addr - switch res, err := sh.Exists(ctx, prm); { - case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err): - // NOTE(@a-savchuk): there were some considerations that we can stop - // immediately if the object is already removed or expired. However, - // the previous method behavior was: - // - keep iterating if it's a root object and already removed, - // - stop iterating if it's not a root object and removed. - // - // Since my task was only improving method speed, let's keep the - // previous method behavior. Continue if it's a root object. - return !isRootObject - case errors.As(err, &siErr) || errors.As(err, &ecErr): - isRootObject = true - objectExists = true - case err != nil: - e.reportShardError( - ctx, sh, "couldn't check for presence in shard", - err, zap.Stringer("address", addr), - ) - case res.Exists(): - objectExists = true - default: - } - - if checkLocked { - if isLocked, err := sh.IsLocked(ctx, addr); err != nil { - e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, - zap.Error(err), - zap.Stringer("address", addr), - ) - } else if isLocked { - retErr = new(apistatus.ObjectLocked) - return true - } - } - - // This exit point must come after checking if the object is locked, - // since the locked index may be populated even if the object doesn't - // exist. - if !objectExists { - return - } - - ids = append(ids, sh.ID().String()) - - // Continue if it's a root object. - return !isRootObject - }); err != nil { - return nil, err - } - - if retErr != nil { - return nil, retErr - } - return ids, nil -} - -// IsLocked checks whether an object is locked according to StorageEngine's state. -func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked", - trace.WithAttributes( - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - var locked bool - var err error - var outErr error - - if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { - locked, err = h.IsLocked(ctx, addr) - if err != nil { - e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) - outErr = err - return false - } - - return locked - }); err != nil { - return false, err - } - - if locked { - return locked, nil - } - - return locked, outErr -} - -// GetLocks return lock id's if object is locked according to StorageEngine's state. -func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks", - trace.WithAttributes( - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - var allLocks []oid.ID - var outErr error - - if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { - locks, err := h.GetLocks(ctx, addr) - if err != nil { - e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) - outErr = err - } - allLocks = append(allLocks, locks...) - return false - }); err != nil { - return nil, err - } - if len(allLocks) > 0 { - return allLocks, nil - } - return allLocks, outErr -} - -func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - sh.HandleExpiredTombstones(ctx, addrs) - - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) - return true - default: - return false - } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) - } -} - -func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - sh.HandleExpiredLocks(ctx, epoch, lockers) - - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) - return true - default: - return false - } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) - } -} - -func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - sh.HandleDeletedLocks(ctx, lockers) - - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) - return true - default: - return false - } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) - } -} - -func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { - if len(ids) == 0 { - return - } - idMap, err := e.selectNonExistentIDs(ctx, ids) - if err != nil { - return - } - if len(idMap) == 0 { - return - } - var failed bool - var prm shard.ContainerSizePrm - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) - failed = true - return true - default: - } - - var drop []cid.ID - for id := range idMap { - prm.SetContainerID(id) - s, err := sh.ContainerSize(ctx, prm) - if err != nil { - e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) - failed = true - return true - } - if s.Size() > 0 { - drop = append(drop, id) - } - } - for _, id := range drop { - delete(idMap, id) - } - - return len(idMap) == 0 - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) - return - } - if failed || len(idMap) == 0 { - return - } - - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) - failed = true - return true - default: - } - - for id := range idMap { - if err := sh.DeleteContainerSize(ctx, id); err != nil { - e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) - failed = true - return true - } - } - - return false - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) - return - } - if failed { - return - } - for id := range idMap { - e.metrics.DeleteContainerSize(id.EncodeToString()) - } -} - -func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []cid.ID) { - if len(ids) == 0 { - return - } - idMap, err := e.selectNonExistentIDs(ctx, ids) - if err != nil { - return - } - if len(idMap) == 0 { - return - } - var failed bool - var prm shard.ContainerCountPrm - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) - failed = true - return true - default: - } - - var drop []cid.ID - for id := range idMap { - prm.ContainerID = id - s, err := sh.ContainerCount(ctx, prm) - if err != nil { - e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err)) - failed = true - return true - } - if s.User > 0 || s.Logic > 0 || s.Phy > 0 { - drop = append(drop, id) - } - } - for _, id := range drop { - delete(idMap, id) - } - - return len(idMap) == 0 - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) - return - } - if failed || len(idMap) == 0 { - return - } - - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { - select { - case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) - failed = true - return true - default: - } - - for id := range idMap { - if err := sh.DeleteContainerCount(ctx, id); err != nil { - e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) - failed = true - return true - } - } - - return false - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) - return - } - if failed { - return - } - for id := range idMap { - e.metrics.DeleteContainerCount(id.EncodeToString()) - } -} - -func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID) (map[cid.ID]struct{}, error) { - cs := e.containerSource.Load() - - idMap := make(map[cid.ID]struct{}) - for _, id := range ids { - isAvailable, err := cs.IsContainerAvailable(ctx, id) - if err != nil { - e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err)) - return nil, err - } - if isAvailable { - continue - } - idMap[id] = struct{}{} - } - return idMap, nil -} diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go deleted file mode 100644 index 0e268cd23..000000000 --- a/pkg/local_object_storage/engine/inhume_test.go +++ /dev/null @@ -1,342 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestStorageEngine_Inhume(t *testing.T) { - cnr := cidtest.ID() - splitID := objectSDK.NewSplitID() - - fs := objectSDK.SearchFilters{} - fs.AddRootFilter() - - tombstoneID := object.AddressOf(testutil.GenerateObjectWithCID(cnr)) - parent := testutil.GenerateObjectWithCID(cnr) - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - child.SetSplitID(splitID) - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(idParent) - idChild, _ := child.ID() - link.SetChildren(idChild) - link.SetSplitID(splitID) - - t.Run("delete small object", func(t *testing.T) { - t.Parallel() - e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - err := Put(context.Background(), e, parent, false) - require.NoError(t, err) - - var inhumePrm InhumePrm - inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - - err = e.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - addrs, err := Select(context.Background(), e, cnr, false, fs) - require.NoError(t, err) - require.Empty(t, addrs) - }) - - t.Run("delete big object", func(t *testing.T) { - t.Parallel() - - te := testNewEngine(t).setShardsNum(t, 2).prepare(t) - e := te.engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - s1, s2 := te.shards[0], te.shards[1] - - var putChild shard.PutPrm - putChild.SetObject(child) - _, err := s1.Put(context.Background(), putChild) - require.NoError(t, err) - - var putLink shard.PutPrm - putLink.SetObject(link) - _, err = s2.Put(context.Background(), putLink) - require.NoError(t, err) - - var inhumePrm InhumePrm - inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - - err = e.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - addrs, err := Select(context.Background(), e, cnr, false, fs) - require.NoError(t, err) - require.Empty(t, addrs) - }) -} - -func TestStorageEngine_ECInhume(t *testing.T) { - parentObjectAddress := oidtest.Address() - containerID := parentObjectAddress.Container() - - chunkObject0 := testutil.GenerateObjectWithCID(containerID) - chunkObject0.SetECHeader(objectSDK.NewECHeader( - objectSDK.ECParentInfo{ - ID: parentObjectAddress.Object(), - }, 0, 4, []byte{}, 0)) - - chunkObject1 := testutil.GenerateObjectWithCID(containerID) - chunkObject1.SetECHeader(objectSDK.NewECHeader( - objectSDK.ECParentInfo{ - ID: parentObjectAddress.Object(), - }, 1, 4, []byte{}, 0)) - - tombstone := objectSDK.NewTombstone() - tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()}) - payload, err := tombstone.Marshal() - require.NoError(t, err) - tombstoneObject := testutil.GenerateObjectWithCID(containerID) - tombstoneObject.SetType(objectSDK.TypeTombstone) - tombstoneObject.SetPayload(payload) - tombstoneObjectAddress := object.AddressOf(tombstoneObject) - - e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - require.NoError(t, Put(context.Background(), e, chunkObject0, false)) - - require.NoError(t, Put(context.Background(), e, tombstoneObject, false)) - - var inhumePrm InhumePrm - inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress) - err = e.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - var alreadyRemoved *apistatus.ObjectAlreadyRemoved - - require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved) - - require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved) -} - -func TestInhumeExpiredRegularObject(t *testing.T) { - t.Parallel() - - const currEpoch = 42 - const objectExpiresAfter = currEpoch - 1 - - engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { - return []shard.Option{ - shard.WithDisabledGC(), - shard.WithMetaBaseOptions(append( - testGetDefaultMetabaseOptions(t), - meta.WithEpochState(epochState{currEpoch}), - )...), - } - }).prepare(t).engine - - cnr := cidtest.ID() - - generateAndPutObject := func() *objectSDK.Object { - obj := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) - - var putPrm PutPrm - putPrm.Object = obj - require.NoError(t, engine.Put(context.Background(), putPrm)) - return obj - } - - t.Run("inhume with tombstone", func(t *testing.T) { - obj := generateAndPutObject() - ts := oidtest.Address() - ts.SetContainer(cnr) - - var prm InhumePrm - prm.WithTarget(ts, object.AddressOf(obj)) - err := engine.Inhume(context.Background(), prm) - require.NoError(t, err) - }) - - t.Run("inhume without tombstone", func(t *testing.T) { - obj := generateAndPutObject() - - var prm InhumePrm - prm.MarkAsGarbage(object.AddressOf(obj)) - err := engine.Inhume(context.Background(), prm) - require.NoError(t, err) - }) -} - -func BenchmarkInhumeMultipart(b *testing.B) { - // The benchmark result insignificantly depends on the number of shards, - // so do not use it as a benchmark parameter, just set it big enough. - numShards := 100 - - for numObjects := 1; numObjects <= 10000; numObjects *= 10 { - b.Run( - fmt.Sprintf("objects=%d", numObjects), - func(b *testing.B) { - benchmarkInhumeMultipart(b, numShards, numObjects) - }, - ) - } -} - -func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { - b.StopTimer() - - engine := testNewEngine(b). - setShardsNum(b, numShards).prepare(b).engine - defer func() { require.NoError(b, engine.Close(context.Background())) }() - - cnt := cidtest.ID() - eg := errgroup.Group{} - - for range b.N { - addrs := make([]oid.Address, numObjects) - - for i := range numObjects { - prm := PutPrm{} - - prm.Object = objecttest.Object().Parent() - prm.Object.SetContainerID(cnt) - prm.Object.SetType(objectSDK.TypeRegular) - - addrs[i] = object.AddressOf(prm.Object) - - eg.Go(func() error { - return engine.Put(context.Background(), prm) - }) - } - require.NoError(b, eg.Wait()) - - ts := oidtest.Address() - ts.SetContainer(cnt) - - prm := InhumePrm{} - prm.WithTarget(ts, addrs...) - - b.StartTimer() - err := engine.Inhume(context.Background(), prm) - require.NoError(b, err) - b.StopTimer() - } -} - -func TestInhumeIfObjectDoesntExist(t *testing.T) { - const numShards = 4 - - engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine - t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) - - t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, false, false) - }) - t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, true, false) - }) - t.Run("force inhume", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, false, true) - }) - - t.Run("object is locked", func(t *testing.T) { - t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, false, false) - }) - t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, true, false) - }) - t.Run("force inhume", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, false, true) - }) - }) -} - -func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { - t.Parallel() - - object := oidtest.Address() - require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) - - err := testHeadObject(e, object) - if withTombstone { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } -} - -func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { - t.Parallel() - - object := oidtest.Address() - require.NoError(t, testLockObject(e, object)) - - err := testInhumeObject(t, e, object, withTombstone, withForce) - if !withForce { - var errLocked *apistatus.ObjectLocked - require.ErrorAs(t, err, &errLocked) - return - } - require.NoError(t, err) - - err = testHeadObject(e, object) - if withTombstone { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } -} - -func testLockObject(e *StorageEngine, obj oid.Address) error { - return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) -} - -func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { - tombstone := oidtest.Address() - tombstone.SetContainer(obj.Container()) - - // Due to the tests design it is possible to set both the options, - // however removal with tombstone and force removal are exclusive. - require.False(t, withTombstone && withForce) - - var inhumePrm InhumePrm - if withTombstone { - inhumePrm.WithTarget(tombstone, obj) - } else { - inhumePrm.MarkAsGarbage(obj) - } - if withForce { - inhumePrm.WithForceRemoval() - } - return e.Inhume(context.Background(), inhumePrm) -} - -func testHeadObject(e *StorageEngine, obj oid.Address) error { - var headPrm HeadPrm - headPrm.WithAddress(obj) - - _, err := e.Head(context.Background(), headPrm) - return err -} diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go deleted file mode 100644 index 073248862..000000000 --- a/pkg/local_object_storage/engine/list.go +++ /dev/null @@ -1,185 +0,0 @@ -package engine - -import ( - "context" - "math/rand" - "sort" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" -) - -// ErrEndOfListing is returned from an object listing with cursor -// when the storage can't return any more objects after the provided -// cursor. Use nil cursor object to start listing again. -var ErrEndOfListing = shard.ErrEndOfListing - -// Cursor is a type for continuous object listing. Cursor contains shard IDs to read -// and shard cursors that contain state from previous read. -type Cursor struct { - current string - shardIDs map[string]bool - shardIDToCursor map[string]*shard.Cursor -} - -func (c *Cursor) getCurrentShardCursor() *shard.Cursor { - return c.shardIDToCursor[c.current] -} - -func (c *Cursor) setCurrentShardCursor(sc *shard.Cursor) { - c.shardIDToCursor[c.current] = sc -} - -func (c *Cursor) nextShard() bool { - var shardsToRead []string - for shardID, read := range c.shardIDs { - if !read { - shardsToRead = append(shardsToRead, shardID) - } - } - if len(shardsToRead) == 0 { - return false - } - c.current = shardsToRead[rand.Intn(len(shardsToRead))] - return true -} - -func (c *Cursor) setShardRead(shardID string) { - c.shardIDs[shardID] = true -} - -// ListWithCursorPrm contains parameters for ListWithCursor operation. -type ListWithCursorPrm struct { - count uint32 - cursor *Cursor -} - -// WithCount sets the maximum amount of addresses that ListWithCursor should return. -func (p *ListWithCursorPrm) WithCount(count uint32) { - p.count = count -} - -// WithCursor sets a cursor for ListWithCursor operation. For initial request -// ignore this param or use nil value. For consecutive requests, use value -// from ListWithCursorRes. -func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) { - p.cursor = cursor -} - -// ListWithCursorRes contains values returned from ListWithCursor operation. -type ListWithCursorRes struct { - addrList []objectcore.Info - cursor *Cursor -} - -// AddressList returns addresses selected by ListWithCursor operation. -func (l ListWithCursorRes) AddressList() []objectcore.Info { - return l.addrList -} - -// Cursor returns cursor for consecutive listing requests. -func (l ListWithCursorRes) Cursor() *Cursor { - return l.cursor -} - -// ListWithCursor lists physical objects available in the engine starting -// from the cursor. It includes regular, tombstone and storage group objects. -// Does not include inhumed objects. Use cursor value from the response -// for consecutive requests. -// -// If count param is big enough, then the method reads objects from different shards -// by portions. In this case shards are chosen randomly, if they're not read out yet. -// -// Adding a shard between ListWithCursor does not invalidate the cursor but new shard -// won't be listed. -// Removing a shard between ListWithCursor leads to the undefined behavior -// (e.g. usage of the objects from the removed shard). -// -// Returns ErrEndOfListing if there are no more objects to return or count -// parameter set to zero. -func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor") - defer span.End() - defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)() - - result := make([]objectcore.Info, 0, prm.count) - - // Set initial cursors - cursor := prm.cursor - if cursor == nil { - shardIDs := getSortedShardIDs(e) - if len(shardIDs) == 0 { - return ListWithCursorRes{}, ErrEndOfListing - } - cursor = newCursor(shardIDs) - } - - const ( - splitShardCountLimit = 100 - shardsNum = 4 - ) - - batchSize := prm.count - if batchSize >= splitShardCountLimit { - batchSize /= shardsNum - } - - for cursor.nextShard() { - if len(result) >= int(prm.count) { - break - } - curr := cursor.current - - e.mtx.RLock() - shardInstance, ok := e.shards[curr] - e.mtx.RUnlock() - if !ok { - cursor.setShardRead(curr) - continue - } - - count := min(prm.count-uint32(len(result)), batchSize) - - var shardPrm shard.ListWithCursorPrm - shardPrm.WithCount(count) - shardPrm.WithCursor(cursor.getCurrentShardCursor()) - - res, err := shardInstance.ListWithCursor(ctx, shardPrm) - if err != nil { - cursor.setShardRead(curr) - continue - } - result = append(result, res.AddressList()...) - cursor.setCurrentShardCursor(res.Cursor()) - } - - if len(result) == 0 { - return ListWithCursorRes{}, ErrEndOfListing - } - - return ListWithCursorRes{ - addrList: result, - cursor: cursor, - }, nil -} - -func getSortedShardIDs(e *StorageEngine) []string { - e.mtx.RLock() - shardIDs := make([]string, 0, len(e.shards)) - for id := range e.shards { - shardIDs = append(shardIDs, id) - } - e.mtx.RUnlock() - sort.Strings(shardIDs) - return shardIDs -} - -func newCursor(shardIDs []string) *Cursor { - shardIDsMap := make(map[string]bool) - shardIDToCursor := make(map[string]*shard.Cursor) - for _, shardID := range shardIDs { - shardIDsMap[shardID] = false - } - return &Cursor{shardIDs: shardIDsMap, shardIDToCursor: shardIDToCursor} -} diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go deleted file mode 100644 index 6cfa546f8..000000000 --- a/pkg/local_object_storage/engine/list_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package engine - -import ( - "context" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestListWithCursor(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - shardNum int - objectNum int - batchSize uint32 - }{ - { - name: "one shard, few objects, small batch size", - shardNum: 1, - objectNum: 2, - batchSize: 1, - }, - { - name: "one shard, many objects, big batch size", - shardNum: 1, - objectNum: 53, - batchSize: 100, - }, - { - name: "many shards, many objects, small batch size", - shardNum: 6, - objectNum: 66, - batchSize: 1, - }, - { - name: "many shards, many objects, big batch size", - shardNum: 6, - objectNum: 99, - batchSize: 100, - }, - } - for i := range tests { - tt := tests[i] - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option { - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages( - newStorages(t, t.TempDir(), 1<<20))), - shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - ), - } - }).prepare(t).engine - defer func() { - require.NoError(t, e.Close(context.Background())) - }() - - expected := make([]object.Info, 0, tt.objectNum) - got := make([]object.Info, 0, tt.objectNum) - - for range tt.objectNum { - containerID := cidtest.ID() - obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'}) - err := e.Put(context.Background(), PutPrm{Object: obj}) - require.NoError(t, err) - expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)}) - } - - var prm ListWithCursorPrm - prm.count = tt.batchSize - for { - res, err := e.ListWithCursor(context.Background(), prm) - if err == ErrEndOfListing { - require.Empty(t, res.AddressList()) - break - } - require.NotEmpty(t, res.AddressList()) - got = append(got, res.AddressList()...) - prm.cursor = res.Cursor() - } - - require.ElementsMatch(t, expected, got) - }) - } -} diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go deleted file mode 100644 index 3b0cf74f9..000000000 --- a/pkg/local_object_storage/engine/lock.go +++ /dev/null @@ -1,156 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var errLockFailed = errors.New("lock operation failed") - -// Lock marks objects as locked with another object. All objects from the -// specified container. -// -// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject). -// -// Locked list should be unique. Panics if it is empty. -func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock", - trace.WithAttributes( - attribute.String("container_id", idCnr.EncodeToString()), - attribute.String("locker", locker.EncodeToString()), - attribute.Int("locked_count", len(locked)), - )) - defer span.End() - defer elapsed("Lock", e.metrics.AddMethodDuration)() - - return e.execIfNotBlocked(func() error { - return e.lock(ctx, idCnr, locker, locked) - }) -} - -func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { - for i := range locked { - st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) - if err != nil { - return err - } - switch st { - case 1: - return logicerr.Wrap(new(apistatus.LockNonRegularObject)) - case 0: - st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) - if err != nil { - return err - } - switch st { - case 1: - return logicerr.Wrap(new(apistatus.LockNonRegularObject)) - case 0: - return logicerr.Wrap(errLockFailed) - } - } - } - - return nil -} - -// Returns: -// - 0: fail -// - 1: locking irregular object -// - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { - // code is pretty similar to inhumeAddr, maybe unify? - root := false - var addrLocked oid.Address - addrLocked.SetContainer(idCnr) - addrLocked.SetObject(locked) - retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { - defer func() { - // if object is root we continue since information about it - // can be presented in other shards - if checkExists && root { - stop = false - } - }() - - if checkExists { - var existsPrm shard.ExistsPrm - existsPrm.Address = addrLocked - exRes, err := sh.Exists(ctx, existsPrm) - if err != nil { - var siErr *objectSDK.SplitInfoError - var eiErr *objectSDK.ECInfoError - if errors.As(err, &eiErr) { - eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) - if !ok { - return false - } - - err = sh.Lock(ctx, idCnr, locker, eclocked) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return false - } - root = true - return false - } else if !errors.As(err, &siErr) { - if shard.IsErrObjectExpired(err) { - // object is already expired => - // do not lock it - return true - } - e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return - } - - root = true - } else if !exRes.Exists() { - return - } - } - - err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked}) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - - var errIrregular *apistatus.LockNonRegularObject - if errors.As(err, &errIrregular) { - status = 1 - return true - } - return false - } - status = 2 - return true - }) - return -} - -func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err := objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return nil, false - } - eclocked = append(eclocked, objID) - } - return eclocked, true -} diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go deleted file mode 100644 index b8c9d6b1d..000000000 --- a/pkg/local_object_storage/engine/lock_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package engine - -import ( - "context" - "strconv" - "testing" - "time" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/panjf2000/ants/v2" - "github.com/stretchr/testify/require" -) - -type tss struct { - expEpoch uint64 -} - -func (t tss) IsTombstoneAvailable(ctx context.Context, _ oid.Address, epoch uint64) bool { - return t.expEpoch >= epoch -} - -func TestLockUserScenario(t *testing.T) { - t.Parallel() - - // Tested user actions: - // 1. stores some object - // 2. locks the object - // 3. tries to inhume the object with tombstone and expects failure - // 4. saves tombstone for LOCK-object and receives error - // 5. waits for an epoch after the lock expiration one - // 6. tries to inhume the object and expects success - const lockerExpiresAfter = 13 - - cnr := cidtest.ID() - tombObj := testutil.GenerateObjectWithCID(cnr) - tombForLockID := oidtest.ID() - tombObj.SetID(tombForLockID) - - testEngine := testNewEngine(t). - setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option { - return []shard.Option{ - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - - return pool - }), - shard.WithTombstoneSource(tss{lockerExpiresAfter}), - } - }). - prepare(t) - e := testEngine.engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - lockerID := oidtest.ID() - tombID := oidtest.ID() - var err error - - var objAddr oid.Address - objAddr.SetContainer(cnr) - - var tombAddr oid.Address - tombAddr.SetContainer(cnr) - tombAddr.SetObject(tombID) - - var lockerAddr oid.Address - lockerAddr.SetContainer(cnr) - lockerAddr.SetObject(lockerID) - - var a objectSDK.Attribute - a.SetKey(objectV2.SysAttributeExpEpoch) - a.SetValue(strconv.Itoa(lockerExpiresAfter)) - - lockerObj := testutil.GenerateObjectWithCID(cnr) - lockerObj.SetID(lockerID) - lockerObj.SetAttributes(a) - - var tombForLockAddr oid.Address - tombForLockAddr.SetContainer(cnr) - tombForLockAddr.SetObject(tombForLockID) - - // 1. - obj := testutil.GenerateObjectWithCID(cnr) - - id, _ := obj.ID() - objAddr.SetObject(id) - - err = Put(context.Background(), e, obj, false) - require.NoError(t, err) - - // 2. - var locker objectSDK.Lock - locker.WriteMembers([]oid.ID{id}) - objectSDK.WriteLock(lockerObj, locker) - - err = Put(context.Background(), e, lockerObj, false) - require.NoError(t, err) - - err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id}) - require.NoError(t, err) - - // 3. - var inhumePrm InhumePrm - inhumePrm.WithTarget(tombAddr, objAddr) - - var objLockedErr *apistatus.ObjectLocked - err = e.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - // 4. - tombObj.SetType(objectSDK.TypeTombstone) - tombObj.SetID(tombForLockID) - tombObj.SetAttributes(a) - - err = Put(context.Background(), e, tombObj, false) - require.NoError(t, err) - - inhumePrm.WithTarget(tombForLockAddr, lockerAddr) - - err = e.Inhume(context.Background(), inhumePrm) - require.ErrorIs(t, err, meta.ErrLockObjectRemoval) - - // 5. - e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1) - - inhumePrm.WithTarget(tombAddr, objAddr) - - require.Eventually(t, func() bool { - err = e.Inhume(context.Background(), inhumePrm) - return err == nil - }, 30*time.Second, time.Second) -} - -func TestLockExpiration(t *testing.T) { - t.Parallel() - - // Tested scenario: - // 1. some object is stored - // 2. lock object for it is stored, and the object is locked - // 3. lock expiration epoch is coming - // 4. after some delay the object is not locked anymore - - testEngine := testNewEngine(t). - setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option { - return []shard.Option{ - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - - return pool - }), - } - }). - prepare(t) - e := testEngine.engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - const lockerExpiresAfter = 13 - - cnr := cidtest.ID() - var err error - - // 1. - obj := testutil.GenerateObjectWithCID(cnr) - - err = Put(context.Background(), e, obj, false) - require.NoError(t, err) - - // 2. - var a objectSDK.Attribute - a.SetKey(objectV2.SysAttributeExpEpoch) - a.SetValue(strconv.Itoa(lockerExpiresAfter)) - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - lock.SetAttributes(a) - - err = Put(context.Background(), e, lock, false) - require.NoError(t, err) - - id, _ := obj.ID() - idLock, _ := lock.ID() - - err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id}) - require.NoError(t, err) - - var inhumePrm InhumePrm - tombAddr := oidtest.Address() - tombAddr.SetContainer(cnr) - inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) - - var objLockedErr *apistatus.ObjectLocked - err = e.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - // 3. - e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1) - - // 4. - tombAddr = oidtest.Address() - tombAddr.SetContainer(cnr) - inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) - - require.Eventually(t, func() bool { - err = e.Inhume(context.Background(), inhumePrm) - return err == nil - }, 30*time.Second, time.Second) -} - -func TestLockForceRemoval(t *testing.T) { - t.Parallel() - - // Tested scenario: - // 1. some object is stored - // 2. lock object for it is stored, and the object is locked - // 3. try to remove lock object and get error - // 4. force lock object removal - // 5. the object is not locked anymore - var e *StorageEngine - - e = testNewEngine(t). - setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option { - return []shard.Option{ - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - - return pool - }), - shard.WithDeletedLockCallback(e.processDeletedLocks), - } - }). - prepare(t).engine - defer func() { require.NoError(t, e.Close(context.Background())) }() - - cnr := cidtest.ID() - var err error - - // 1. - obj := testutil.GenerateObjectWithCID(cnr) - - err = Put(context.Background(), e, obj, false) - require.NoError(t, err) - - // 2. - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - - err = Put(context.Background(), e, lock, false) - require.NoError(t, err) - - id, _ := obj.ID() - idLock, _ := lock.ID() - - err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id}) - require.NoError(t, err) - - // 3. - var inhumePrm InhumePrm - inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - - var objLockedErr *apistatus.ObjectLocked - err = e.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) - - err = e.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - // 4. - var deletePrm DeletePrm - deletePrm.WithAddress(objectcore.AddressOf(lock)) - deletePrm.WithForceRemoval() - - require.NoError(t, e.Delete(context.Background(), deletePrm)) - - // 5. - inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - - err = e.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) -} - -func TestLockExpiredRegularObject(t *testing.T) { - const currEpoch = 42 - const objectExpiresAfter = currEpoch - 1 - - engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { - return []shard.Option{ - shard.WithDisabledGC(), - shard.WithMetaBaseOptions(append( - testGetDefaultMetabaseOptions(t), - meta.WithEpochState(epochState{currEpoch}), - )...), - } - }).prepare(t).engine - - cnr := cidtest.ID() - - object := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) - - address := objectcore.AddressOf(object) - - var putPrm PutPrm - putPrm.Object = object - require.NoError(t, engine.Put(context.Background(), putPrm)) - - var getPrm GetPrm - var errNotFound *apistatus.ObjectNotFound - - getPrm.WithAddress(address) - _, err := engine.Get(context.Background(), getPrm) - require.ErrorAs(t, err, &errNotFound) - - t.Run("lock expired regular object", func(t *testing.T) { - engine.Lock(context.Background(), - address.Container(), - oidtest.ID(), - []oid.ID{address.Object()}, - ) - - res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object)) - require.NoError(t, err) - require.True(t, res) - }) - - t.Run("get expired and locked regular object", func(t *testing.T) { - getPrm.WithAddress(objectcore.AddressOf(object)) - - res, err := engine.Get(context.Background(), getPrm) - require.NoError(t, err) - require.Equal(t, res.Object(), object) - }) -} diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go deleted file mode 100644 index 963292d83..000000000 --- a/pkg/local_object_storage/engine/metrics.go +++ /dev/null @@ -1,93 +0,0 @@ -package engine - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -type ( - MetricRegister = metrics.EngineMetrics - GCMetrics = metrics.GCMetrics - WriteCacheMetrics = metrics.WriteCacheMetrics - NullBool = metrics.NullBool -) - -func elapsed(method string, addFunc func(method string, d time.Duration)) func() { - t := time.Now() - - return func() { - addFunc(method, time.Since(t)) - } -} - -type gcMetrics struct { - storage metrics.GCMetrics - shardID string -} - -func (m *gcMetrics) SetShardID(id string) { - m.shardID = id -} - -func (m *gcMetrics) AddRunDuration(d time.Duration, success bool) { - m.storage.AddRunDuration(m.shardID, d, success) -} - -func (m *gcMetrics) AddDeletedCount(deleted, failed uint64) { - m.storage.AddDeletedCount(m.shardID, deleted, failed) -} - -func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success bool, objectType string) { - m.storage.AddExpiredObjectCollectionDuration(m.shardID, d, success, objectType) -} - -func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) { - m.storage.AddInhumedObjectCount(m.shardID, count, objectType) -} - -type ( - noopMetrics struct{} - noopWriteCacheMetrics struct{} - noopGCMetrics struct{} -) - -var ( - _ MetricRegister = noopMetrics{} - _ WriteCacheMetrics = noopWriteCacheMetrics{} - _ GCMetrics = noopGCMetrics{} -) - -func (noopMetrics) AddMethodDuration(string, time.Duration) {} -func (noopMetrics) SetObjectCounter(string, string, uint64) {} -func (noopMetrics) AddToObjectCounter(string, string, int) {} -func (noopMetrics) SetMode(string, mode.Mode) {} -func (noopMetrics) AddToContainerSize(string, int64) {} -func (noopMetrics) DeleteContainerSize(string) {} -func (noopMetrics) DeleteContainerCount(string) {} -func (noopMetrics) AddToPayloadCounter(string, int64) {} -func (noopMetrics) IncErrorCounter(string) {} -func (noopMetrics) ClearErrorCounter(string) {} -func (noopMetrics) DeleteShardMetrics(string) {} -func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {} -func (noopMetrics) IncContainerObjectCounter(string, string, string) {} -func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {} -func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {} -func (noopMetrics) SetRefillPercent(string, string, uint32) {} -func (noopMetrics) SetRefillStatus(string, string, string) {} -func (noopMetrics) SetEvacuationInProgress(string, bool) {} -func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} } -func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} } - -func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {} -func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {} -func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {} -func (noopWriteCacheMetrics) SetMode(string, string) {} -func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {} -func (noopWriteCacheMetrics) Close(string, string) {} - -func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {} -func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {} -func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {} -func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go deleted file mode 100644 index 10cf5ffd5..000000000 --- a/pkg/local_object_storage/engine/put.go +++ /dev/null @@ -1,186 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// PutPrm groups the parameters of Put operation. -type PutPrm struct { - Object *objectSDK.Object - IsIndexedContainer bool -} - -var errPutShard = errors.New("could not put object to any shard") - -type putToShardStatus byte - -const ( - putToShardUnknown putToShardStatus = iota - putToShardSuccess - putToShardExists - putToShardRemoved -) - -type putToShardRes struct { - status putToShardStatus - err error -} - -// Put saves the object to local storage. -// -// Returns any error encountered that -// did not allow to completely save the object. -// -// Returns an error if executions are blocked (see BlockExecution). -// -// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed. -func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put", - trace.WithAttributes( - attribute.String("address", object.AddressOf(prm.Object).EncodeToString()), - )) - defer span.End() - defer elapsed("Put", e.metrics.AddMethodDuration)() - - err = e.execIfNotBlocked(func() error { - err = e.put(ctx, prm) - return err - }) - - return -} - -func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { - addr := object.AddressOf(prm.Object) - - // In #1146 this check was parallelized, however, it became - // much slower on fast machines for 4 shards. - var ecParent oid.Address - if prm.Object.ECHeader() != nil { - ecParent.SetObject(prm.Object.ECHeader().Parent()) - ecParent.SetContainer(addr.Container()) - } - var shPrm shard.ExistsPrm - shPrm.Address = addr - shPrm.ECParentAddress = ecParent - existed, locked, err := e.exists(ctx, shPrm) - if err != nil { - return err - } - - if !existed && locked { - lockers, err := e.GetLocks(ctx, ecParent) - if err != nil { - return err - } - for _, locker := range lockers { - err = e.lock(ctx, addr.Container(), locker, []oid.ID{addr.Object()}) - if err != nil { - return err - } - } - } - - var shRes putToShardRes - if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { - e.mtx.RLock() - _, ok := e.shards[sh.ID().String()] - e.mtx.RUnlock() - if !ok { - // Shard was concurrently removed, skip. - return false - } - shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) - return shRes.status != putToShardUnknown - }); err != nil { - return err - } - switch shRes.status { - case putToShardUnknown: - return errPutShard - case putToShardRemoved: - return shRes.err - case putToShardExists, putToShardSuccess: - return nil - default: - return errPutShard - } -} - -// putToShard puts object to sh. -// Return putToShardStatus and error if it is necessary to propagate an error upper. -func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, - addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, -) (res putToShardRes) { - var existPrm shard.ExistsPrm - existPrm.Address = addr - - exists, err := sh.Exists(ctx, existPrm) - if err != nil { - if shard.IsErrObjectExpired(err) { - // object is already found but - // expired => do nothing with it - res.status = putToShardExists - } else { - e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - } - - return // this is not ErrAlreadyRemoved error so we can go to the next shard - } - - if exists.Exists() { - res.status = putToShardExists - return - } - - var putPrm shard.PutPrm - putPrm.SetObject(obj) - putPrm.SetIndexAttributes(isIndexedContainer) - - _, err = sh.Put(ctx, putPrm) - if err != nil { - if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || - errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - return - } - if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - res.status = putToShardRemoved - res.err = err - return - } - - e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) - return - } - - res.status = putToShardSuccess - - return -} - -// Put writes provided object to local storage. -func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error { - return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer}) -} diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go deleted file mode 100644 index 7ec4742d8..000000000 --- a/pkg/local_object_storage/engine/range.go +++ /dev/null @@ -1,236 +0,0 @@ -package engine - -import ( - "context" - "errors" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// RngPrm groups the parameters of GetRange operation. -type RngPrm struct { - off, ln uint64 - - addr oid.Address -} - -// RngRes groups the resulting values of GetRange operation. -type RngRes struct { - obj *objectSDK.Object -} - -// WithAddress is a GetRng option to set the address of the requested object. -// -// Option is required. -func (p *RngPrm) WithAddress(addr oid.Address) { - p.addr = addr -} - -// WithPayloadRange is a GetRange option to set range of requested payload data. -// -// Missing an option or calling with zero length is equivalent -// to getting the full payload range. -func (p *RngPrm) WithPayloadRange(rng *objectSDK.Range) { - p.off, p.ln = rng.GetOffset(), rng.GetLength() -} - -// Object returns the requested object part. -// -// Instance payload contains the requested range of the original object. -func (r RngRes) Object() *objectSDK.Object { - return r.obj -} - -// GetRange reads part of an object from local storage. -// -// Returns any error encountered that -// did not allow to completely read the object part. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in local storage. -// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object is inhumed. -// Returns ErrRangeOutOfBounds if the requested object range is out of bounds. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - attribute.String("offset", strconv.FormatUint(prm.off, 10)), - attribute.String("length", strconv.FormatUint(prm.ln, 10)), - )) - defer span.End() - defer elapsed("GetRange", e.metrics.AddMethodDuration)() - - err = e.execIfNotBlocked(func() error { - res, err = e.getRange(ctx, prm) - return err - }) - - return -} - -func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) { - var shPrm shard.RngPrm - shPrm.SetAddress(prm.addr) - shPrm.SetRange(prm.off, prm.ln) - - it := &getRangeShardIterator{ - OutError: new(apistatus.ObjectNotFound), - ShardPrm: shPrm, - Address: prm.addr, - Engine: e, - } - - if err := it.tryGetWithMeta(ctx); err != nil { - return RngRes{}, err - } - - if it.SplitInfo != nil { - return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) - } - if it.ECInfo != nil { - return RngRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo)) - } - - if it.Object == nil { - // If any shard is in a degraded mode, we should assume that metabase could store - // info about some object. - if it.ShardWithMeta.Shard == nil && !it.HasDegraded || !client.IsErrObjectNotFound(it.OutError) { - return RngRes{}, it.OutError - } - - if err := it.tryGetFromBlobstor(ctx); err != nil { - return RngRes{}, err - } - - if it.Object == nil { - return RngRes{}, it.OutError - } - if it.ShardWithMeta.Shard != nil && it.MetaError != nil { - e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, - zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.Error(it.MetaError), - zap.Stringer("address", prm.addr)) - } - } - - return RngRes{ - obj: it.Object, - }, nil -} - -// GetRange reads object payload range from local storage by provided address. -func GetRange(ctx context.Context, storage *StorageEngine, addr oid.Address, rng *objectSDK.Range) ([]byte, error) { - var rangePrm RngPrm - rangePrm.WithAddress(addr) - rangePrm.WithPayloadRange(rng) - - res, err := storage.GetRange(ctx, rangePrm) - if err != nil { - return nil, err - } - - return res.Object().Payload(), nil -} - -type getRangeShardIterator struct { - Object *objectSDK.Object - SplitInfoError *objectSDK.SplitInfoError - SplitInfo *objectSDK.SplitInfo - ECInfoError *objectSDK.ECInfoError - ECInfo *objectSDK.ECInfo - OutError error - ShardWithMeta hashedShard - MetaError error - HasDegraded bool - - ShardPrm shard.RngPrm - Address oid.Address - Engine *StorageEngine -} - -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { - noMeta := sh.GetMode().NoMetabase() - i.HasDegraded = i.HasDegraded || noMeta - i.ShardPrm.SetIgnoreMeta(noMeta) - - res, err := sh.GetRange(ctx, i.ShardPrm) - if err == nil { - i.Object = res.Object() - return true - } - - if res.HasMeta() { - i.ShardWithMeta = sh - i.MetaError = err - } - switch { - case client.IsErrObjectNotFound(err): - return false // ignore, go to next shard - case errors.As(err, &i.SplitInfoError): - if i.SplitInfo == nil { - i.SplitInfo = objectSDK.NewSplitInfo() - } - - util.MergeSplitInfo(i.SplitInfoError.SplitInfo(), i.SplitInfo) - - _, withLink := i.SplitInfo.Link() - _, withLast := i.SplitInfo.LastPart() - - // stop iterating over shards if SplitInfo structure is complete - return withLink && withLast - case errors.As(err, &i.ECInfoError): - if i.ECInfo == nil { - i.ECInfo = objectSDK.NewECInfo() - } - - util.MergeECInfo(i.ECInfoError.ECInfo(), i.ECInfo) - // stop iterating over shards if ECInfo structure is complete - return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total) - case - client.IsErrObjectAlreadyRemoved(err), - shard.IsErrOutOfRange(err): - i.OutError = err - - return true // stop, return it back - default: - i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) - return false - } - }) -} - -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { - // If the object is not found but is present in metabase, - // try to fetch it from blobstor directly. If it is found in any - // blobstor, increase the error counter for the shard which contains the meta. - i.ShardPrm.SetIgnoreMeta(true) - - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { - if sh.GetMode().NoMetabase() { - // Already processed it without a metabase. - return false - } - - res, err := sh.GetRange(ctx, i.ShardPrm) - if shard.IsErrOutOfRange(err) { - i.OutError = new(apistatus.ObjectOutOfRange) - return true - } - i.Object = res.Object() - return err == nil - }) -} diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go deleted file mode 100644 index a29dd7ed9..000000000 --- a/pkg/local_object_storage/engine/rebuild.go +++ /dev/null @@ -1,108 +0,0 @@ -package engine - -import ( - "context" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "golang.org/x/sync/errgroup" -) - -type RebuildPrm struct { - ShardIDs []*shard.ID - ConcurrencyLimit uint32 - TargetFillPercent uint32 -} - -type ShardRebuildResult struct { - ShardID *shard.ID - Success bool - ErrorMsg string -} - -type RebuildRes struct { - ShardResults []ShardRebuildResult -} - -func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Rebuild", - trace.WithAttributes( - attribute.Int("shard_id_count", len(prm.ShardIDs)), - attribute.Int64("target_fill_percent", int64(prm.TargetFillPercent)), - attribute.Int64("concurrency_limit", int64(prm.ConcurrencyLimit)), - )) - defer span.End() - - res := RebuildRes{ - ShardResults: make([]ShardRebuildResult, 0, len(prm.ShardIDs)), - } - resGuard := &sync.Mutex{} - - concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)} - - eg, egCtx := errgroup.WithContext(ctx) - for _, shardID := range prm.ShardIDs { - eg.Go(func() error { - e.mtx.RLock() - sh, ok := e.shards[shardID.String()] - e.mtx.RUnlock() - - if !ok { - resGuard.Lock() - defer resGuard.Unlock() - res.ShardResults = append(res.ShardResults, ShardRebuildResult{ - ShardID: shardID, - ErrorMsg: errShardNotFound.Error(), - }) - return nil - } - - err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{ - ConcurrencyLimiter: concLimiter, - TargetFillPercent: prm.TargetFillPercent, - }) - - resGuard.Lock() - defer resGuard.Unlock() - - if err != nil { - res.ShardResults = append(res.ShardResults, ShardRebuildResult{ - ShardID: shardID, - ErrorMsg: err.Error(), - }) - } else { - res.ShardResults = append(res.ShardResults, ShardRebuildResult{ - ShardID: shardID, - Success: true, - }) - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return RebuildRes{}, err - } - return res, nil -} - -type concurrencyLimiter struct { - semaphore chan struct{} -} - -func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { - select { - case l.semaphore <- struct{}{}: - return l.releaseWorkSlot, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -func (l *concurrencyLimiter) releaseWorkSlot() { - <-l.semaphore -} diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go deleted file mode 100644 index 8ab3c5217..000000000 --- a/pkg/local_object_storage/engine/remove_copies.go +++ /dev/null @@ -1,139 +0,0 @@ -package engine - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/hrw" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -// errRemoveDuplicatesInProgress is returned when another rebalancing is in progress. -// We need it because `Rebalance` removes objects and executing it concurrently -// on 2 shards can lead to data loss. In future this restriction could be relaxed. -var errRemoveDuplicatesInProgress = errors.New("redundant copies removal is already in progress") - -const defaultRemoveDuplicatesConcurrency = 256 - -type RemoveDuplicatesPrm struct { - Concurrency int -} - -// RemoveDuplicates iterates over all objects and removes duplicate object copies -// from shards which are worse as defined by HRW sort. -// Safety: -// 1. Concurrent execution is prohibited, thus 1 object copy should always be left. -// 2. If we delete an object from another thread, this is not a problem. Currently, -// we have 2 thread that can remove "valid" (non-expired and logically non-removed) objects: -// policer and rebalance. For rebalance see (1). -// If policer removes something, we do not care if both copies are removed or one of them is left, -// as the remaining copy will be removed during the next policer iteration. -func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicatesPrm) error { - if !e.removeDuplicatesInProgress.CompareAndSwap(false, true) { - return errRemoveDuplicatesInProgress - } - defer e.removeDuplicatesInProgress.Store(false) - - if prm.Concurrency <= 0 { - prm.Concurrency = defaultRemoveDuplicatesConcurrency - } - - e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies, - zap.Int("concurrency", prm.Concurrency)) - - // The mutext must be taken for the whole duration to avoid target shard being removed - // concurrently: this can lead to data loss. - e.mtx.RLock() - defer e.mtx.RUnlock() - - // Iterate by shards to be sure that no objects from 2 different shards are removed simultaneously. - // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0. - // However we could change weights in future and easily forget this function. - for _, sh := range e.shards { - e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID())) - ch := make(chan oid.Address) - - errG, ctx := errgroup.WithContext(ctx) - errG.SetLimit(prm.Concurrency + 1) // +1 for the listing thread - - errG.Go(func() error { - defer close(ch) - - var cursor *meta.Cursor - for { - var listPrm shard.ListWithCursorPrm - listPrm.WithCount(uint32(prm.Concurrency)) - listPrm.WithCursor(cursor) - res, err := sh.ListWithCursor(ctx, listPrm) - if err != nil { - if errors.Is(err, meta.ErrEndOfListing) { - return nil - } - return err - } - for _, addr := range res.AddressList() { - select { - case <-ctx.Done(): - return ctx.Err() - case ch <- addr.Address: - } - } - cursor = res.Cursor() - } - }) - - for range prm.Concurrency { - errG.Go(func() error { - return e.removeObjects(ctx, ch) - }) - } - if err := errG.Wait(); err != nil { - e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) - return err - } - } - - e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies) - return nil -} - -// removeObjects reads addresses from ch and removes all objects from other shards, excluding excludeID. -func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address) error { - shards := make([]hashedShard, 0, len(e.shards)) - for _, sh := range e.shards { - shards = append(shards, sh) - } - - for addr := range ch { - h := hrw.StringHash(addr.EncodeToString()) - hrw.SortHasherSliceByValue(shards, h) - found := false - for i := range shards { - var existsPrm shard.ExistsPrm - existsPrm.Address = addr - - res, err := shards[i].Exists(ctx, existsPrm) - if err != nil { - return err - } else if !res.Exists() { - continue - } else if !found { - found = true - continue - } - - var deletePrm shard.DeletePrm - deletePrm.SetAddresses(addr) - _, err = shards[i].Delete(ctx, deletePrm) - if err != nil { - return err - } - } - } - return nil -} diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go deleted file mode 100644 index 6d2291c74..000000000 --- a/pkg/local_object_storage/engine/remove_copies_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package engine - -import ( - "context" - "sync" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestRebalance(t *testing.T) { - t.Parallel() - - te := newEngineWithErrorThreshold(t, "", 0) - defer func() { - require.NoError(t, te.ng.Close(context.Background())) - }() - - const ( - objCount = 20 - copyCount = (objCount + 2) / 3 - ) - - type objectWithShard struct { - bestShard shard.ID - worstShard shard.ID - object *objectSDK.Object - } - - objects := make([]objectWithShard, objCount) - for i := range objects { - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - obj.SetPayload(make([]byte, errSmallSize)) - objects[i].object = obj - - shards := te.ng.sortShards(object.AddressOf(obj)) - objects[i].bestShard = *shards[0].Shard.ID() - objects[i].worstShard = *shards[1].Shard.ID() - } - - for i := range objects { - var prm shard.PutPrm - prm.SetObject(objects[i].object) - - var err1, err2 error - te.ng.mtx.RLock() - // Every 3rd object (i%3 == 0) is put to both shards, others are distributed. - if i%3 != 1 { - _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) - } - if i%3 != 2 { - _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm) - } - te.ng.mtx.RUnlock() - - require.NoError(t, err1) - require.NoError(t, err2) - } - - var removedMtx sync.Mutex - var removed []deleteEvent - for _, shard := range te.shards { - id := *shard.id - shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) { - removedMtx.Lock() - removed = append(removed, deleteEvent{shardID: id, addr: prm.Address}) - removedMtx.Unlock() - return common.DeleteRes{}, nil - })) - } - - err := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{}) - require.NoError(t, err) - - require.Equal(t, copyCount, len(removed)) - - removedMask := make([]bool, len(objects)) -loop: - for i := range removed { - for j := range objects { - if removed[i].addr == object.AddressOf(objects[j].object) { - require.Equal(t, objects[j].worstShard, removed[i].shardID, - "object %d was expected to be removed from another shard", j) - removedMask[j] = true - continue loop - } - } - require.FailNow(t, "unexpected object was removed", removed[i].addr) - } - - for i := range copyCount { - if i%3 == 0 { - require.True(t, removedMask[i], "object %d was expected to be removed", i) - } else { - require.False(t, removedMask[i], "object %d was not expected to be removed", i) - } - } -} - -func TestRebalanceSingleThread(t *testing.T) { - t.Parallel() - - te := newEngineWithErrorThreshold(t, "", 0) - defer func() { - require.NoError(t, te.ng.Close(context.Background())) - }() - - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - obj.SetPayload(make([]byte, errSmallSize)) - - var prm shard.PutPrm - prm.SetObject(obj) - te.ng.mtx.RLock() - _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) - _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm) - te.ng.mtx.RUnlock() - require.NoError(t, err1) - require.NoError(t, err2) - - signal := make(chan struct{}) // unblock rebalance - started := make(chan struct{}) // make sure rebalance is started - for _, shard := range te.shards { - shard.largeFileStorage.SetOption(teststore.WithDelete(func(common.DeletePrm) (common.DeleteRes, error) { - close(started) - <-signal - return common.DeleteRes{}, nil - })) - } - - var firstErr error - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - firstErr = te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{}) - }() - - <-started - secondErr := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{}) - require.ErrorIs(t, secondErr, errRemoveDuplicatesInProgress) - - close(signal) - wg.Wait() - require.NoError(t, firstErr) -} - -type deleteEvent struct { - shardID shard.ID - addr oid.Address -} - -func TestRebalanceExitByContext(t *testing.T) { - te := newEngineWithErrorThreshold(t, "", 0) - defer func() { - require.NoError(t, te.ng.Close(context.Background())) - }() - - objects := make([]*objectSDK.Object, 4) - for i := range objects { - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - obj.SetPayload(make([]byte, errSmallSize)) - objects[i] = obj - } - - for i := range objects { - var prm shard.PutPrm - prm.SetObject(objects[i]) - - te.ng.mtx.RLock() - _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) - _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm) - te.ng.mtx.RUnlock() - - require.NoError(t, err1) - require.NoError(t, err2) - } - - var removed []deleteEvent - deleteCh := make(chan struct{}) - signal := make(chan struct{}) - for _, shard := range te.shards { - id := *shard.id - shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) { - deleteCh <- struct{}{} - <-signal - removed = append(removed, deleteEvent{shardID: id, addr: prm.Address}) - return common.DeleteRes{}, nil - })) - } - - ctx, cancel := context.WithCancel(context.Background()) - - var rebalanceErr error - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - rebalanceErr = te.ng.RemoveDuplicates(ctx, RemoveDuplicatesPrm{Concurrency: 1}) - }() - - const removeCount = 3 - for range removeCount - 1 { - <-deleteCh - signal <- struct{}{} - } - <-deleteCh - cancel() - close(signal) - - wg.Wait() - require.ErrorIs(t, rebalanceErr, context.Canceled) - require.Equal(t, removeCount, len(removed)) -} diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go deleted file mode 100644 index 4243a5481..000000000 --- a/pkg/local_object_storage/engine/select.go +++ /dev/null @@ -1,169 +0,0 @@ -package engine - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// SelectPrm groups the parameters of Select operation. -type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters - indexedContainer bool -} - -// SelectRes groups the resulting values of Select operation. -type SelectRes struct { - addrList []oid.Address -} - -// WithContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) { - p.cnr = cnr - p.indexedContainer = indexedContainer -} - -// WithFilters is a Select option to set the object filters. -func (p *SelectPrm) WithFilters(fs objectSDK.SearchFilters) { - p.filters = fs -} - -// AddressList returns list of addresses of the selected objects. -func (r SelectRes) AddressList() []oid.Address { - return r.addrList -} - -// Select selects the objects from local storage that match select parameters. -// -// Returns any error encountered that did not allow to completely select the objects. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Select", - trace.WithAttributes( - attribute.String("container_id", prm.cnr.EncodeToString()), - )) - defer span.End() - defer elapsed("Select", e.metrics.AddMethodDuration)() - - err = e.execIfNotBlocked(func() error { - var sErr error - res, sErr = e._select(ctx, prm) - return sErr - }) - - return -} - -func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { - addrList := make([]oid.Address, 0) - uniqueMap := make(map[string]struct{}) - - var shPrm shard.SelectPrm - shPrm.SetContainerID(prm.cnr, prm.indexedContainer) - shPrm.SetFilters(prm.filters) - - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - res, err := sh.Select(ctx, shPrm) - if err != nil { - e.reportShardError(ctx, sh, "could not select objects from shard", err) - return false - } - - for _, addr := range res.AddressList() { // save only unique values - if _, ok := uniqueMap[addr.EncodeToString()]; !ok { - uniqueMap[addr.EncodeToString()] = struct{}{} - addrList = append(addrList, addr) - } - } - - return false - }); err != nil { - return SelectRes{}, err - } - - return SelectRes{ - addrList: addrList, - }, nil -} - -// List returns `limit` available physically storage object addresses in engine. -// If limit is zero, then returns all available object addresses. -// -// Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { - defer elapsed("List", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - var lErr error - res, lErr = e.list(ctx, limit) - return lErr - }) - - return -} - -func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { - addrList := make([]oid.Address, 0, limit) - uniqueMap := make(map[string]struct{}) - ln := uint64(0) - - // consider iterating over shuffled shards - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - res, err := sh.List(ctx) // consider limit result of shard iterator - if err != nil { - e.reportShardError(ctx, sh, "could not select objects from shard", err) - } else { - for _, addr := range res.AddressList() { // save only unique values - if _, ok := uniqueMap[addr.EncodeToString()]; !ok { - uniqueMap[addr.EncodeToString()] = struct{}{} - addrList = append(addrList, addr) - - ln++ - if limit > 0 && ln >= limit { - return true - } - } - } - } - - return false - }); err != nil { - return SelectRes{}, err - } - - return SelectRes{ - addrList: addrList, - }, nil -} - -// Select selects objects from local storage using provided filters. -func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) { - var selectPrm SelectPrm - selectPrm.WithContainerID(cnr, isIndexedContainer) - selectPrm.WithFilters(fs) - - res, err := storage.Select(ctx, selectPrm) - if err != nil { - return nil, err - } - - return res.AddressList(), nil -} - -// List returns `limit` available physically storage object addresses in -// engine. If limit is zero, then returns all available object addresses. -func List(ctx context.Context, storage *StorageEngine, limit uint64) ([]oid.Address, error) { - res, err := storage.List(ctx, limit) - if err != nil { - return nil, err - } - - return res.AddressList(), nil -} diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go deleted file mode 100644 index 69067c500..000000000 --- a/pkg/local_object_storage/engine/shards.go +++ /dev/null @@ -1,482 +0,0 @@ -package engine - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/hrw" - "github.com/google/uuid" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -var errShardNotFound = logicerr.New("shard not found") - -type hashedShard struct { - shardWrapper - hash uint64 -} - -type metricsWithID struct { - id string - mw MetricRegister -} - -func (m *metricsWithID) SetShardID(id string) { - // concurrent settings are not expected => - // no mutex protection - m.id = id -} - -func (m *metricsWithID) SetObjectCounter(objectType string, v uint64) { - m.mw.SetObjectCounter(m.id, objectType, v) -} - -func (m *metricsWithID) AddToObjectCounter(objectType string, delta int) { - m.mw.AddToObjectCounter(m.id, objectType, delta) -} - -func (m *metricsWithID) IncObjectCounter(objectType string) { - m.mw.AddToObjectCounter(m.id, objectType, +1) -} - -func (m *metricsWithID) SetMode(mode mode.Mode) { - m.mw.SetMode(m.id, mode) -} - -func (m *metricsWithID) AddToContainerSize(cnr string, size int64) { - m.mw.AddToContainerSize(cnr, size) -} - -func (m *metricsWithID) AddToPayloadSize(size int64) { - m.mw.AddToPayloadCounter(m.id, size) -} - -func (m *metricsWithID) IncErrorCounter() { - m.mw.IncErrorCounter(m.id) -} - -func (m *metricsWithID) ClearErrorCounter() { - m.mw.ClearErrorCounter(m.id) -} - -func (m *metricsWithID) DeleteShardMetrics() { - m.mw.DeleteShardMetrics(m.id) -} - -func (m *metricsWithID) SetContainerObjectsCount(cnrID string, objectType string, value uint64) { - m.mw.SetContainerObjectCounter(m.id, cnrID, objectType, value) -} - -func (m *metricsWithID) IncContainerObjectsCount(cnrID string, objectType string) { - m.mw.IncContainerObjectCounter(m.id, cnrID, objectType) -} - -func (m *metricsWithID) SubContainerObjectsCount(cnrID string, objectType string, value uint64) { - m.mw.SubContainerObjectCounter(m.id, cnrID, objectType, value) -} - -func (m *metricsWithID) IncRefillObjectsCount(path string, size int, success bool) { - m.mw.IncRefillObjectsCount(m.id, path, size, success) -} - -func (m *metricsWithID) SetRefillPercent(path string, percent uint32) { - m.mw.SetRefillPercent(m.id, path, percent) -} - -func (m *metricsWithID) SetRefillStatus(path string, status string) { - m.mw.SetRefillStatus(m.id, path, status) -} - -func (m *metricsWithID) SetEvacuationInProgress(value bool) { - m.mw.SetEvacuationInProgress(m.id, value) -} - -// AddShard adds a new shard to the storage engine. -// -// Returns any error encountered that did not allow adding a shard. -// Otherwise returns the ID of the added shard. -func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) { - sh, err := e.createShard(ctx, opts) - if err != nil { - return nil, fmt.Errorf("create a shard: %w", err) - } - - err = e.addShard(sh) - if err != nil { - return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) - } - - e.metrics.SetMode(sh.ID().String(), sh.GetMode()) - - return sh.ID(), nil -} - -func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) { - id, err := generateShardID() - if err != nil { - return nil, fmt.Errorf("generate shard ID: %w", err) - } - - opts = e.appendMetrics(id, opts) - - sh := shard.New(append(opts, - shard.WithID(id), - shard.WithExpiredTombstonesCallback(e.processExpiredTombstones), - shard.WithExpiredLocksCallback(e.processExpiredLocks), - shard.WithDeletedLockCallback(e.processDeletedLocks), - shard.WithReportErrorFunc(e.reportShardErrorByID), - shard.WithZeroSizeCallback(e.processZeroSizeContainers), - shard.WithZeroCountCallback(e.processZeroCountContainers), - )...) - - if err := sh.UpdateID(ctx); err != nil { - e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) - } - - return sh, nil -} - -func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard.Option { - e.mtx.RLock() - defer e.mtx.RUnlock() - - opts = append(opts, - shard.WithMetricsWriter( - &metricsWithID{ - id: id.String(), - mw: e.metrics, - }, - ), - shard.WithWriteCacheMetrics( - &writeCacheMetrics{ - shardID: id.String(), - metrics: e.metrics.WriteCache(), - }, - ), - shard.WithGCMetrics( - &gcMetrics{ - storage: e.metrics.GC(), - shardID: id.String(), - }, - ), - ) - - return opts -} - -func (e *StorageEngine) addShard(sh *shard.Shard) error { - e.mtx.Lock() - defer e.mtx.Unlock() - - strID := sh.ID().String() - if _, ok := e.shards[strID]; ok { - return fmt.Errorf("shard with id %s was already added", strID) - } - - e.shards[strID] = hashedShard{ - shardWrapper: shardWrapper{ - errorCount: new(atomic.Uint32), - Shard: sh, - }, - hash: hrw.StringHash(strID), - } - - return nil -} - -// removeShards removes specified shards. Skips non-existent shards. -// Logs errors about shards that it could not Close after the removal. -func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { - if len(ids) == 0 { - return - } - - ss := make([]hashedShard, 0, len(ids)) - - e.mtx.Lock() - for _, id := range ids { - sh, found := e.shards[id] - if !found { - continue - } - - e.metrics.DeleteShardMetrics(id) - - ss = append(ss, sh) - delete(e.shards, id) - - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, - zap.String("id", id)) - } - e.mtx.Unlock() - - for _, sh := range ss { - err := sh.SetMode(ctx, mode.Disabled) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled, - zap.Stringer("id", sh.ID()), - zap.Error(err), - ) - } - err = sh.Close(ctx) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard, - zap.Stringer("id", sh.ID()), - zap.Error(err), - ) - } - } -} - -func generateShardID() (*shard.ID, error) { - uid, err := uuid.NewRandom() - if err != nil { - return nil, err - } - - bin, err := uid.MarshalBinary() - if err != nil { - return nil, err - } - - return shard.NewIDFromBytes(bin), nil -} - -func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) []hashedShard { - e.mtx.RLock() - defer e.mtx.RUnlock() - - h := hrw.StringHash(objAddr.EncodeToString()) - shards := make([]hashedShard, 0, len(e.shards)) - for _, sh := range e.shards { - shards = append(shards, sh) - } - hrw.SortHasherSliceByValue(shards, h) - return shards -} - -func (e *StorageEngine) unsortedShards() []hashedShard { - e.mtx.RLock() - defer e.mtx.RUnlock() - - shards := make([]hashedShard, 0, len(e.shards)) - - for _, sh := range e.shards { - shards = append(shards, sh) - } - - return shards -} - -func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { - for i, sh := range e.sortShards(addr) { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if handler(i, sh) { - break - } - } - return nil -} - -func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { - for _, sh := range e.unsortedShards() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if handler(sh) { - break - } - } - return nil -} - -// SetShardMode sets mode of the shard with provided identifier. -// -// Returns an error if shard mode was not set, or shard was not found in storage engine. -func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error { - e.mtx.RLock() - defer e.mtx.RUnlock() - - for shID, sh := range e.shards { - if id.String() == shID { - if resetErrorCounter { - sh.errorCount.Store(0) - e.metrics.ClearErrorCounter(shID) - } - return sh.SetMode(ctx, m) - } - } - - return errShardNotFound -} - -// HandleNewEpoch notifies every shard about NewEpoch event. -func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { - e.mtx.RLock() - defer e.mtx.RUnlock() - - for _, sh := range e.shards { - select { - case <-ctx.Done(): - return - case sh.NotificationChannel() <- epoch: - default: - e.log.Debug(ctx, logs.ShardEventProcessingInProgress, - zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) - } - } -} - -func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error { - if len(ids) == 0 { - return logicerr.New("ids must be non-empty") - } - - deletedShards, err := e.deleteShards(ctx, ids) - if err != nil { - return err - } - - return e.closeShards(ctx, deletedShards) -} - -// closeShards closes deleted shards. Tries to close all shards. -// Returns single error with joined shard errors. -func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error { - var multiErr error - var multiErrGuard sync.Mutex - var eg errgroup.Group - for _, sh := range deletedShards { - eg.Go(func() error { - err := sh.SetMode(ctx, mode.Disabled) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled, - zap.Stringer("id", sh.ID()), - zap.Error(err), - ) - multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err)) - multiErrGuard.Unlock() - } - - err = sh.Close(ctx) - if err != nil { - e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard, - zap.Stringer("id", sh.ID()), - zap.Error(err), - ) - multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err)) - multiErrGuard.Unlock() - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return err - } - return multiErr -} - -// deleteShards deletes shards with specified ids from engine shard list -// and releases all engine resources associated with shards. -// Returns deleted shards or error if some shard could not be deleted. -func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) { - ss := make([]hashedShard, 0, len(ids)) - - e.mtx.Lock() - defer e.mtx.Unlock() - - for _, id := range ids { - idStr := id.String() - sh, found := e.shards[idStr] - if !found { - return nil, errShardNotFound - } - ss = append(ss, sh) - } - - if len(ss) == len(e.shards) { - return nil, logicerr.New("could not delete all the shards") - } - - for _, sh := range ss { - idStr := sh.ID().String() - - e.metrics.DeleteShardMetrics(idStr) - - delete(e.shards, idStr) - - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, - zap.String("id", idStr)) - } - - return ss, nil -} - -func (s hashedShard) Hash() uint64 { - return s.hash -} - -func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) { - var err error - var info []shard.Info - prm := shard.ExistsPrm{ - Address: obj, - } - var siErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - - if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { - res, exErr := hs.Exists(ctx, prm) - if exErr != nil { - if client.IsErrObjectAlreadyRemoved(exErr) { - err = new(apistatus.ObjectAlreadyRemoved) - return true - } - - // Check if error is either SplitInfoError or ECInfoError. - // True means the object is virtual. - if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) { - info = append(info, hs.DumpInfo()) - return false - } - - if shard.IsErrObjectExpired(exErr) { - err = exErr - return true - } - - if !client.IsErrObjectNotFound(exErr) { - e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address)) - } - - return false - } - if res.Exists() { - info = append(info, hs.DumpInfo()) - } - return false - }); itErr != nil { - return nil, itErr - } - return info, err -} diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go deleted file mode 100644 index 3aa9629b0..000000000 --- a/pkg/local_object_storage/engine/shards_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/hrw" - "github.com/stretchr/testify/require" -) - -func TestRemoveShard(t *testing.T) { - const numOfShards = 6 - - te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t) - e, ids := te.engine, te.shardIDs - defer func() { require.NoError(t, e.Close(context.Background())) }() - - require.Equal(t, numOfShards, len(e.shards)) - - removedNum := numOfShards / 2 - - mSh := make(map[string]bool, numOfShards) - for i, id := range ids { - if i == removedNum { - break - } - - mSh[id.String()] = true - } - - for id, remove := range mSh { - if remove { - e.removeShards(context.Background(), id) - } - } - - require.Equal(t, numOfShards-removedNum, len(e.shards)) - - for id, removed := range mSh { - _, ok := e.shards[id] - require.True(t, ok != removed) - } -} - -func TestDisableShards(t *testing.T) { - t.Parallel() - - const numOfShards = 2 - - te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t) - e, ids := te.engine, te.shardIDs - defer func() { require.NoError(t, e.Close(context.Background())) }() - - require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical)) - require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical)) - require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical)) - - require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]})) - - require.Equal(t, 1, len(e.shards)) -} - -func TestSortShardsByWeight(t *testing.T) { - t.Parallel() - - const numOfShards = 500 - - var shards1 []hashedShard - var weights1 []float64 - var shards2 []hashedShard - for i := range numOfShards { - shards1 = append(shards1, hashedShard{ - hash: uint64(i), - }) - weights1 = append(weights1, 0) - shards2 = append(shards2, hashedShard{ - hash: uint64(i), - }) - } - - hrw.SortHasherSliceByWeightValue(shards1, weights1, 0) - hrw.SortHasherSliceByValue(shards2, 0) - - require.Equal(t, shards1, shards2) -} diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go deleted file mode 100644 index cfd15b4d4..000000000 --- a/pkg/local_object_storage/engine/tree.go +++ /dev/null @@ -1,456 +0,0 @@ -package engine - -import ( - "context" - "errors" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var _ pilorama.Forest = (*StorageEngine)(nil) - -// TreeMove implements the pilorama.Forest interface. -func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeMove", - trace.WithAttributes( - attribute.String("container_id", d.CID.EncodeToString()), - attribute.Int("position", d.Position), - attribute.Int("size", d.Size), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - index, lst, err := e.getTreeShard(ctx, d.CID, treeID) - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - return nil, err - } - - lm, err := lst[index].TreeMove(ctx, d, treeID, m) - if err != nil { - if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, - zap.Stringer("cid", d.CID), - zap.String("tree", treeID)) - } - - return nil, err - } - return lm, nil -} - -// TreeAddByPath implements the pilorama.Forest interface. -func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeAddByPath", - trace.WithAttributes( - attribute.String("container_id", d.CID.EncodeToString()), - attribute.Int("position", d.Position), - attribute.Int("size", d.Size), - attribute.String("tree_id", treeID), - attribute.String("attr", attr), - attribute.Int("path_count", len(path)), - attribute.Int("meta_count", len(m)), - ), - ) - defer span.End() - - index, lst, err := e.getTreeShard(ctx, d.CID, treeID) - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - return nil, err - } - - lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m) - if err != nil { - if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, - zap.Stringer("cid", d.CID), - zap.String("tree", treeID)) - } - return nil, err - } - return lm, nil -} - -// TreeApply implements the pilorama.Forest interface. -func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApply", - trace.WithAttributes( - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.Bool("background", backgroundSync), - ), - ) - defer span.End() - - index, lst, err := e.getTreeShard(ctx, cnr, treeID) - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - return err - } - - err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync) - if err != nil { - if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, - zap.Stringer("cid", cnr), - zap.String("tree", treeID)) - } - return err - } - return nil -} - -// TreeApplyBatch implements the pilorama.Forest interface. -func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch", - trace.WithAttributes( - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - index, lst, err := e.getTreeShard(ctx, cnr, treeID) - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - return err - } - - err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m) - if err != nil { - if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err, - zap.Stringer("cid", cnr), - zap.String("tree", treeID)) - } - return err - } - return nil -} - -// TreeGetByPath implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetByPath", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("attr", attr), - attribute.Int("path_count", len(path)), - attribute.Bool("latest", latest), - ), - ) - defer span.End() - - var err error - var nodes []pilorama.Node - for _, sh := range e.sortShards(cid) { - nodes, err = sh.TreeGetByPath(ctx, cid, treeID, attr, path, latest) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return nodes, nil - } - return nil, err -} - -// TreeGetMeta implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetMeta", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("node_id", strconv.FormatUint(nodeID, 10)), - ), - ) - defer span.End() - - var err error - var m pilorama.Meta - var p uint64 - for _, sh := range e.sortShards(cid) { - m, p, err = sh.TreeGetMeta(ctx, cid, treeID, nodeID) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return m, p, nil - } - return pilorama.Meta{}, 0, err -} - -// TreeGetChildren implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]pilorama.NodeInfo, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetChildren", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("node_id", strconv.FormatUint(nodeID, 10)), - ), - ) - defer span.End() - - var err error - var nodes []pilorama.NodeInfo - for _, sh := range e.sortShards(cid) { - nodes, err = sh.TreeGetChildren(ctx, cid, treeID, nodeID) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return nodes, nil - } - return nil, err -} - -// TreeSortedByFilename implements the pilorama.Forest interface. -func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - var err error - var nodes []pilorama.MultiNodeInfo - var cursor *pilorama.Cursor - for _, sh := range e.sortShards(cid) { - nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return nodes, cursor, nil - } - return nil, last, err -} - -// TreeGetOpLog implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetOpLog", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("height", strconv.FormatUint(height, 10)), - ), - ) - defer span.End() - - var err error - var lm pilorama.Move - for _, sh := range e.sortShards(cid) { - lm, err = sh.TreeGetOpLog(ctx, cid, treeID, height) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return lm, nil - } - return lm, err -} - -// TreeDrop implements the pilorama.Forest interface. -func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeDrop", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - var err error - for _, sh := range e.sortShards(cid) { - err = sh.TreeDrop(ctx, cid, treeID) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { - e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return nil - } - return err -} - -// TreeList implements the pilorama.Forest interface. -func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeList", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - ), - ) - defer span.End() - - var resIDs []string - - for _, sh := range e.unsortedShards() { - ids, err := sh.TreeList(ctx, cid) - if err != nil { - if errors.Is(err, shard.ErrPiloramaDisabled) || errors.Is(err, shard.ErrReadOnlyMode) { - return nil, err - } - - e.reportShardError(ctx, sh, "can't perform `TreeList`", err, - zap.Stringer("cid", cid)) - - // returns as much info about - // trees as possible - continue - } - - resIDs = append(resIDs, ids...) - } - - return resIDs, nil -} - -// TreeExists implements the pilorama.Forest interface. -func (e *StorageEngine) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeExists", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - _, _, err := e.getTreeShard(ctx, cid, treeID) - if errors.Is(err, pilorama.ErrTreeNotFound) { - return false, nil - } - return err == nil, err -} - -func (e *StorageEngine) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeHeight", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - index, lst, err := e.getTreeShard(ctx, cid, treeID) - if err != nil { - return 0, nil - } - return lst[index].TreeHeight(ctx, cid, treeID) -} - -// TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeUpdateLastSyncHeight", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("height", strconv.FormatUint(height, 10)), - ), - ) - defer span.End() - - index, lst, err := e.getTreeShard(ctx, cid, treeID) - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - return err - } - - err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height) - if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - return err -} - -// TreeLastSyncHeight implements the pilorama.Forest interface. -func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeLastSyncHeight", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - var err error - var height uint64 - for _, sh := range e.sortShards(cid) { - height, err = sh.TreeLastSyncHeight(ctx, cid, treeID) - if err != nil { - if err == shard.ErrPiloramaDisabled { - break - } - if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(ctx, sh, "can't read tree synchronization height", err, - zap.Stringer("cid", cid), - zap.String("tree", treeID)) - } - continue - } - return height, err - } - return height, err -} - -func (e *StorageEngine) getTreeShard(ctx context.Context, cid cidSDK.ID, treeID string) (int, []hashedShard, error) { - lst := e.sortShards(cid) - for i, sh := range lst { - exists, err := sh.TreeExists(ctx, cid, treeID) - if err != nil { - return 0, nil, err - } - if exists { - return i, lst, err - } - } - - return 0, lst, pilorama.ErrTreeNotFound -} diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go deleted file mode 100644 index ea0a9e74e..000000000 --- a/pkg/local_object_storage/engine/tree_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package engine - -import ( - "context" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func BenchmarkTreeVsSearch(b *testing.B) { - b.Run("10 objects", func(b *testing.B) { - benchmarkTreeVsSearch(b, 10) - }) - b.Run("100 objects", func(b *testing.B) { - benchmarkTreeVsSearch(b, 100) - }) - b.Run("1000 objects", func(b *testing.B) { - benchmarkTreeVsSearch(b, 1000) - }) -} - -func benchmarkTreeVsSearch(b *testing.B, objCount int) { - te := newEngineWithErrorThreshold(b, "", 0) - defer func() { - require.NoError(b, te.ng.Close(context.Background())) - }() - - cid := cidtest.ID() - d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1} - treeID := "someTree" - - for i := range objCount { - obj := testutil.GenerateObjectWithCID(cid) - testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i)) - err := Put(context.Background(), te.ng, obj, false) - if err != nil { - b.Fatal(err) - } - _, err = te.ng.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, nil, - []pilorama.KeyValue{{Key: pilorama.AttributeFilename, Value: []byte(strconv.Itoa(i))}}) - if err != nil { - b.Fatal(err) - } - } - - b.Run("search", func(b *testing.B) { - var prm SelectPrm - prm.WithContainerID(cid, true) - - var fs objectSDK.SearchFilters - fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual) - prm.WithFilters(fs) - - for range b.N { - res, err := te.ng.Select(context.Background(), prm) - if err != nil { - b.Fatal(err) - } - if count := len(res.addrList); count != 1 { - b.Fatalf("expected 1 object, got %d", count) - } - } - }) - b.Run("TreeGetByPath", func(b *testing.B) { - for range b.N { - nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true) - if err != nil { - b.Fatal(err) - } - if count := len(nodes); count != 1 { - b.Fatalf("expected 1 object, got %d", count) - } - } - }) -} diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go deleted file mode 100644 index e9ba3410f..000000000 --- a/pkg/local_object_storage/engine/writecache.go +++ /dev/null @@ -1,194 +0,0 @@ -package engine - -import ( - "context" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "golang.org/x/sync/errgroup" -) - -// FlushWriteCachePrm groups the parameters of FlushWriteCache operation. -type FlushWriteCachePrm struct { - shardID *shard.ID - ignoreErrors bool - seal bool -} - -// SetShardID is an option to set shard ID. -// -// Option is required. -func (p *FlushWriteCachePrm) SetShardID(id *shard.ID) { - p.shardID = id -} - -// SetIgnoreErrors sets errors ignore flag. -func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - -// SetSeal sets seal flag. -func (p *FlushWriteCachePrm) SetSeal(v bool) { - p.seal = v -} - -// FlushWriteCacheRes groups the resulting values of FlushWriteCache operation. -type FlushWriteCacheRes struct{} - -// FlushWriteCache flushes write-cache on a single shard. -func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache", - trace.WithAttributes( - attribute.String("shard_id", p.shardID.String()), - attribute.Bool("ignore_errors", p.ignoreErrors), - attribute.Bool("seal", p.seal), - )) - defer span.End() - - e.mtx.RLock() - sh, ok := e.shards[p.shardID.String()] - e.mtx.RUnlock() - - if !ok { - return FlushWriteCacheRes{}, errShardNotFound - } - - var prm shard.FlushWriteCachePrm - prm.SetIgnoreErrors(p.ignoreErrors) - prm.SetSeal(p.seal) - - return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm) -} - -type SealWriteCachePrm struct { - ShardIDs []*shard.ID - IgnoreErrors bool - Async bool - RestoreMode bool - Shrink bool -} - -type ShardSealResult struct { - ShardID *shard.ID - Success bool - ErrorMsg string -} - -type SealWriteCacheRes struct { - ShardResults []ShardSealResult -} - -// SealWriteCache flushed all data to blobstore and moves write-cache to degraded read only mode. -func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePrm) (SealWriteCacheRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.SealWriteCache", - trace.WithAttributes( - attribute.Int("shard_id_count", len(prm.ShardIDs)), - attribute.Bool("ignore_errors", prm.IgnoreErrors), - attribute.Bool("restore_mode", prm.RestoreMode), - )) - defer span.End() - - res := SealWriteCacheRes{ - ShardResults: make([]ShardSealResult, 0, len(prm.ShardIDs)), - } - resGuard := &sync.Mutex{} - - eg, egCtx := errgroup.WithContext(ctx) - for _, shardID := range prm.ShardIDs { - eg.Go(func() error { - e.mtx.RLock() - sh, ok := e.shards[shardID.String()] - e.mtx.RUnlock() - - if !ok { - resGuard.Lock() - defer resGuard.Unlock() - res.ShardResults = append(res.ShardResults, ShardSealResult{ - ShardID: shardID, - ErrorMsg: errShardNotFound.Error(), - }) - return nil - } - - err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, Async: prm.Async, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink}) - - resGuard.Lock() - defer resGuard.Unlock() - - if err != nil { - res.ShardResults = append(res.ShardResults, ShardSealResult{ - ShardID: shardID, - ErrorMsg: err.Error(), - }) - } else { - res.ShardResults = append(res.ShardResults, ShardSealResult{ - ShardID: shardID, - Success: true, - }) - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return SealWriteCacheRes{}, err - } - return res, nil -} - -type writeCacheMetrics struct { - shardID string - metrics metrics.WriteCacheMetrics - path string -} - -func (m *writeCacheMetrics) SetPath(path string) { - m.path = path -} - -func (m *writeCacheMetrics) SetShardID(id string) { - m.shardID = id -} - -func (m *writeCacheMetrics) Get(d time.Duration, success bool, st writecache.StorageType) { - m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Get", success, d) -} - -func (m *writeCacheMetrics) Delete(d time.Duration, success bool, st writecache.StorageType) { - m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Delete", success, d) -} - -func (m *writeCacheMetrics) Put(d time.Duration, success bool, st writecache.StorageType) { - m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Put", success, d) -} - -func (m *writeCacheMetrics) SetEstimateSize(size uint64) { - m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), size) -} - -func (m *writeCacheMetrics) SetMode(mod mode.ComponentMode) { - m.metrics.SetMode(m.shardID, mod.String()) -} - -func (m *writeCacheMetrics) SetActualCounters(count uint64) { - m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), count) -} - -func (m *writeCacheMetrics) Flush(success bool, st writecache.StorageType) { - m.metrics.IncOperationCounter(m.shardID, m.path, st.String(), "Flush", metrics.NullBool{Bool: success, Valid: true}) -} - -func (m *writeCacheMetrics) Evict(st writecache.StorageType) { - m.metrics.IncOperationCounter(m.shardID, m.path, st.String(), "Evict", metrics.NullBool{}) -} - -func (m *writeCacheMetrics) Close() { - m.metrics.Close(m.shardID, m.path) -} diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go deleted file mode 100644 index 6b101fa60..000000000 --- a/pkg/local_object_storage/internal/log/log.go +++ /dev/null @@ -1,36 +0,0 @@ -package storagelog - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Write writes message about storage engine's operation to logger. -func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) { - logger.Debug(ctx, logs.StorageOperation, fields...) -} - -// AddressField returns logger's field for object address. -// -// Address should be type of *object.Address or string. -func AddressField(addr any) zap.Field { - return zap.Any("address", addr) -} - -// OpField returns logger's field for operation type. -func OpField(op string) zap.Field { - return zap.String("op", op) -} - -// StorageTypeField returns logger's field for storage type. -func StorageTypeField(typ string) zap.Field { - return zap.String("type", typ) -} - -// StorageIDField returns logger's field for storage ID. -func StorageIDField(id []byte) zap.Field { - return zap.String("storage_id", string(id)) -} diff --git a/pkg/local_object_storage/internal/metaerr/error.go b/pkg/local_object_storage/internal/metaerr/error.go deleted file mode 100644 index 41b8504bc..000000000 --- a/pkg/local_object_storage/internal/metaerr/error.go +++ /dev/null @@ -1,33 +0,0 @@ -package metaerr - -import "errors" - -// Error is a wrapper for SSD-related errors. -// In our model it unites metabase, pilorama and write-cache errors. -type Error struct { - err error -} - -// New returns simple error with a provided error message. -func New(msg string) Error { - return Error{err: errors.New(msg)} -} - -// Error implements the error interface. -func (e Error) Error() string { - return e.err.Error() -} - -// Wrap wraps arbitrary error. -// Returns nil if err == nil. -func Wrap(err error) error { - if err != nil { - return Error{err: err} - } - return nil -} - -// Unwrap returns underlying error. -func (e Error) Unwrap() error { - return e.err -} diff --git a/pkg/local_object_storage/internal/metaerr/error_test.go b/pkg/local_object_storage/internal/metaerr/error_test.go deleted file mode 100644 index acde48793..000000000 --- a/pkg/local_object_storage/internal/metaerr/error_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package metaerr - -import ( - "errors" - "fmt" - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestError(t *testing.T) { - t.Run("errors.Is", func(t *testing.T) { - e1 := errors.New("some error") - ee := Wrap(e1) - require.ErrorIs(t, ee, e1) - - e2 := fmt.Errorf("wrap: %w", e1) - ee = Wrap(e2) - require.ErrorIs(t, ee, e1) - require.ErrorIs(t, ee, e2) - - require.Equal(t, errors.Unwrap(ee), e2) - }) - - t.Run("errors.As", func(t *testing.T) { - e1 := testError{42} - ee := Wrap(e1) - - { - var actual testError - require.ErrorAs(t, ee, &actual) - require.Equal(t, e1.data, actual.data) - } - { - var actual Error - require.ErrorAs(t, ee, &actual) - require.Equal(t, e1, actual.err) - } - - e2 := fmt.Errorf("wrap: %w", e1) - ee = Wrap(e2) - - { - var actual testError - require.ErrorAs(t, ee, &actual) - require.Equal(t, e1.data, actual.data) - } - }) -} - -func TestNilWrap(t *testing.T) { - require.NoError(t, Wrap(nil)) -} - -func TestErrorMessage(t *testing.T) { - msg := "sth to report" - err := New(msg) - require.Contains(t, err.Error(), msg) -} - -type testError struct { - data uint64 -} - -func (e testError) Error() string { - return strconv.FormatUint(e.data, 10) -} diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go deleted file mode 100644 index d46365296..000000000 --- a/pkg/local_object_storage/internal/storagetest/storage.go +++ /dev/null @@ -1,122 +0,0 @@ -package storagetest - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -// Component represents single storage component. -type Component interface { - Open(context.Context, mode.Mode) error - SetMode(context.Context, mode.Mode) error - Init(context.Context) error - Close(context.Context) error -} - -// Constructor constructs storage component. -// Each call must create a component using different file-system path. -type Constructor = func(t *testing.T) Component - -// TestAll checks that storage component doesn't panic under -// any circumstances during shard operation. -func TestAll(t *testing.T, cons Constructor) { - modes := []mode.Mode{ - mode.ReadWrite, - mode.ReadOnly, - mode.Degraded, - mode.DegradedReadOnly, - } - - t.Run("close after open", func(t *testing.T) { - TestCloseAfterOpen(t, cons) - }) - t.Run("close twice", func(t *testing.T) { - TestCloseTwice(t, cons) - }) - t.Run("set mode", func(t *testing.T) { - for _, m := range modes { - t.Run(m.String(), func(t *testing.T) { - TestSetMode(t, cons, m) - }) - } - }) - t.Run("mode transition", func(t *testing.T) { - for _, from := range modes { - for _, to := range modes { - TestModeTransition(t, cons, from, to) - } - } - }) -} - -// TestCloseAfterOpen checks that `Close` can be done right after `Open`. -// Use-case: open shard, encounter error, close before the initialization. -func TestCloseAfterOpen(t *testing.T, cons Constructor) { - t.Run("RW", func(t *testing.T) { - // Use-case: irrecoverable error on some components, close everything. - s := cons(t) - require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Close(context.Background())) - }) - t.Run("RO", func(t *testing.T) { - // Use-case: irrecoverable error on some components, close everything. - // Open in read-only must be done after the db is here. - s := cons(t) - require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init(context.Background())) - require.NoError(t, s.Close(context.Background())) - - require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.Close(context.Background())) - }) -} - -// TestCloseTwice checks that `Close` can be done twice. -func TestCloseTwice(t *testing.T, cons Constructor) { - // Use-case: move to maintenance mode twice, first time failed. - s := cons(t) - require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init(context.Background())) - require.NoError(t, s.Close(context.Background())) - require.NoError(t, s.Close(context.Background())) // already closed, no-op -} - -// TestSetMode checks that any mode transition can be done safely. -func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) { - t.Run("before init", func(t *testing.T) { - // Use-case: metabase `Init` failed, - // call `SetMode` on all not-yet-initialized components. - s := cons(t) - require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.SetMode(context.Background(), m)) - - t.Run("after open in RO", func(t *testing.T) { - require.NoError(t, s.Close(context.Background())) - require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.SetMode(context.Background(), m)) - }) - - require.NoError(t, s.Close(context.Background())) - }) - t.Run("after init", func(t *testing.T) { - s := cons(t) - // Use-case: notmal node operation. - require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init(context.Background())) - require.NoError(t, s.SetMode(context.Background(), m)) - require.NoError(t, s.Close(context.Background())) - }) -} - -func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) { - // Use-case: normal node operation. - s := cons(t) - require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init(context.Background())) - require.NoError(t, s.SetMode(context.Background(), from)) - require.NoError(t, s.SetMode(context.Background(), to)) - require.NoError(t, s.Close(context.Background())) -} diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go deleted file mode 100644 index 52b199b0b..000000000 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ /dev/null @@ -1,114 +0,0 @@ -package testutil - -import ( - cryptorand "crypto/rand" - "encoding/binary" - "math/rand" - "sync/atomic" - "testing" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -// AddressGenerator is the interface of types that generate object addresses. -type AddressGenerator interface { - Next() oid.Address -} - -// SeqAddrGenerator is an AddressGenerator that generates addresses sequentially and wraps around the given max ID. -type SeqAddrGenerator struct { - cnt atomic.Uint64 - MaxID uint64 -} - -var _ AddressGenerator = &SeqAddrGenerator{} - -func (g *SeqAddrGenerator) Next() oid.Address { - var id oid.ID - binary.LittleEndian.PutUint64(id[:], ((g.cnt.Add(1)-1)%g.MaxID)+1) - var addr oid.Address - addr.SetContainer(cid.ID{}) - addr.SetObject(id) - return addr -} - -// RandAddrGenerator is an addressGenerator that generates random addresses in the given range. -type RandAddrGenerator uint64 - -func (g RandAddrGenerator) Next() oid.Address { - var id oid.ID - binary.LittleEndian.PutUint64(id[:], uint64(1+int(rand.Int63n(int64(g))))) - var addr oid.Address - addr.SetContainer(cid.ID{}) - addr.SetObject(id) - return addr -} - -// ObjectGenerator is the interface of types that generate object entries. -type ObjectGenerator interface { - Next() *objectSDK.Object -} - -// SeqObjGenerator is an ObjectGenerator that generates entries with random payloads of size objSize and sequential IDs. -type SeqObjGenerator struct { - cnt atomic.Uint64 - ObjSize uint64 -} - -var _ ObjectGenerator = &SeqObjGenerator{} - -func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { - data := make([]byte, sz) - _, _ = cryptorand.Read(data) - obj := GenerateObjectWithCIDWithPayload(cid, data) - obj.SetID(oid) - return obj -} - -func (g *SeqObjGenerator) Next() *objectSDK.Object { - var id oid.ID - binary.LittleEndian.PutUint64(id[:], g.cnt.Add(1)) - return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) -} - -// RandObjGenerator is an ObjectGenerator that generates entries with random IDs and payloads of size objSize. -type RandObjGenerator struct { - ObjSize uint64 -} - -var _ ObjectGenerator = &RandObjGenerator{} - -func (g *RandObjGenerator) Next() *objectSDK.Object { - var id oid.ID - _, _ = cryptorand.Read(id[:]) - return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) -} - -// OverwriteObjGenerator is an ObjectGenerator that generates entries with random payloads of size objSize and at most maxObjects distinct IDs. -type OverwriteObjGenerator struct { - ObjSize uint64 - MaxObjects uint64 -} - -func (g *OverwriteObjGenerator) Next() *objectSDK.Object { - var id oid.ID - binary.LittleEndian.PutUint64(id[:], uint64(1+rand.Int63n(int64(g.MaxObjects)))) - return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) -} - -func AddressFromObject(t testing.TB, obj *objectSDK.Object) oid.Address { - var addr oid.Address - - id, isSet := obj.ID() - require.True(t, isSet, "object ID is not set") - addr.SetObject(id) - - cid, isSet := obj.ContainerID() - require.True(t, isSet, "container ID is not set") - addr.SetContainer(cid) - - return addr -} diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go deleted file mode 100644 index cc6f726a4..000000000 --- a/pkg/local_object_storage/internal/testutil/generators_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package testutil - -import ( - "encoding/binary" - "slices" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestOverwriteObjGenerator(t *testing.T) { - gen := &OverwriteObjGenerator{ - ObjSize: 10, - MaxObjects: 4, - } - for range 40 { - obj := gen.Next() - id, isSet := obj.ID() - i := binary.LittleEndian.Uint64(id[:]) - - require.True(t, isSet) - require.Equal(t, gen.ObjSize, uint64(len(obj.Payload()))) - require.True(t, 1 <= i && i <= gen.MaxObjects) - } -} - -func TestRandObjGenerator(t *testing.T) { - gen := &RandObjGenerator{ObjSize: 10} - for range 10 { - obj := gen.Next() - - require.Equal(t, gen.ObjSize, uint64(len(obj.Payload()))) - } -} - -func TestSeqObjGenerator(t *testing.T) { - gen := &SeqObjGenerator{ObjSize: 10} - var addrs []string - for i := 1; i <= 10; i++ { - obj := gen.Next() - id, isSet := obj.ID() - addrs = append(addrs, AddressFromObject(t, obj).EncodeToString()) - - require.True(t, isSet) - require.Equal(t, gen.ObjSize, uint64(len(obj.Payload()))) - require.Equal(t, uint64(i), binary.LittleEndian.Uint64(id[:])) - } - require.True(t, slices.IsSorted(addrs)) -} - -func TestRandAddrGenerator(t *testing.T) { - gen := RandAddrGenerator(5) - for range 50 { - addr := gen.Next() - id := addr.Object() - k := binary.LittleEndian.Uint64(id[:]) - - require.True(t, 1 <= k && k <= uint64(gen)) - } -} - -func TestSeqAddrGenerator(t *testing.T) { - gen := &SeqAddrGenerator{MaxID: 10} - for i := 1; i <= 20; i++ { - addr := gen.Next() - id := addr.Object() - - require.Equal(t, uint64((i-1)%int(gen.MaxID)+1), binary.LittleEndian.Uint64(id[:])) - } -} diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go deleted file mode 100644 index 1087e40be..000000000 --- a/pkg/local_object_storage/internal/testutil/object.go +++ /dev/null @@ -1,75 +0,0 @@ -package testutil - -import ( - "crypto/rand" - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "git.frostfs.info/TrueCloudLab/tzhash/tz" -) - -const defaultDataSize = 32 - -func GenerateObject() *objectSDK.Object { - return GenerateObjectWithCID(cidtest.ID()) -} - -func GenerateObjectWithSize(sz int) *objectSDK.Object { - data := make([]byte, sz) - _, _ = rand.Read(data) - return GenerateObjectWithCIDWithPayload(cidtest.ID(), data) -} - -func GenerateObjectWithCID(cnr cid.ID) *objectSDK.Object { - data := make([]byte, defaultDataSize) - _, _ = rand.Read(data) - return GenerateObjectWithCIDWithPayload(cnr, data) -} - -func GenerateObjectWithCIDWithPayload(cnr cid.ID, data []byte) *objectSDK.Object { - var ver version.Version - ver.SetMajor(2) - ver.SetMinor(1) - - var csum checksum.Checksum - csum.SetSHA256(sha256.Sum256(data)) - - var csumTZ checksum.Checksum - csumTZ.SetTillichZemor(tz.Sum(csum.Value())) - - obj := objectSDK.New() - obj.SetID(oidtest.ID()) - obj.SetOwnerID(usertest.ID()) - obj.SetContainerID(cnr) - obj.SetVersion(&ver) - obj.SetPayload(data) - obj.SetPayloadSize(uint64(len(data))) - obj.SetPayloadChecksum(csum) - obj.SetPayloadHomomorphicHash(csumTZ) - - return obj -} - -func AddAttribute(obj *objectSDK.Object, key, val string) { - var attr objectSDK.Attribute - attr.SetKey(key) - attr.SetValue(val) - - attrs := obj.Attributes() - attrs = append(attrs, attr) - obj.SetAttributes(attrs...) -} - -func AddPayload(obj *objectSDK.Object, size int) { - buf := make([]byte, size) - _, _ = rand.Read(buf) - - obj.SetPayload(buf) - obj.SetPayloadSize(uint64(size)) -} diff --git a/pkg/local_object_storage/metabase/VERSION.md b/pkg/local_object_storage/metabase/VERSION.md deleted file mode 100644 index 9cfc95332..000000000 --- a/pkg/local_object_storage/metabase/VERSION.md +++ /dev/null @@ -1,112 +0,0 @@ -# Metabase versioning - -This file describes changes between the metabase versions. - -Warning: database schema below is outdated and incomplete, see source code. - -## Current - -### Primary buckets -- Graveyard bucket - - Name: `_Graveyard` - - Key: object address - - Value: tombstone address -- Garbage bucket - - Name: `_Garbage` - - Key: object address - - Value: dummy value -- Bucket containing IDs of objects that are candidates for moving - to another shard. - - Name: `_ToMoveIt` - - Key: object address - - Value: dummy value -- Container volume bucket - - Name: `_ContainerSize` - - Key: container ID - - Value: container size in bytes as little-endian uint64 -- Bucket for storing locked objects information - - Name: `_Locked` - - Key: container ID - - Value: bucket mapping objects locked to the list of corresponding LOCK objects -- Bucket containing auxilliary information. All keys are custom and are not connected to the container - - Name: `_i` - - Keys and values - - `id` -> shard id as bytes - - `version` -> metabase version as little-endian uint64 - - `phy_counter` -> shard's physical object counter as little-endian uint64 - - `logic_counter` -> shard's logical object counter as little-endian uint64 - -### Unique index buckets -- Buckets containing objects of REGULAR type - - Name: container ID - - Key: object ID - - Value: marshalled object -- Buckets containing objects of LOCK type - - Name: container ID + `_LOCKER` - - Key: object ID - - Value: marshalled object -- Buckets containing objects of STORAGEGROUP type - - Name: container ID + `_SG` - - Key: object ID - - Value: marshaled object -- Buckets containing objects of TOMBSTONE type - - Name: container ID + `_TS` - - Key: object ID - - Value: marshaled object -- Buckets mapping objects to the storage ID they are stored in - - Name: container ID + `_small` - - Key: object ID - - Value: storage ID -- Buckets for mapping parent object to the split info - - Name: container ID + `_root` - - Key: object ID - - Value: split info - -### FKBT index buckets -- Buckets mapping owner to object IDs - - Name: containerID + `_ownerid` - - Key: owner ID as base58 string - - Value: bucket containing object IDs as keys -- Buckets containing objects attributes indexes - - Name: containerID + `_attr_` + attribute key - - Key: attribute value - - Value: bucket containing object IDs as keys - -### List index buckets -- Buckets mapping payload hash to a list of object IDs - - Name: container ID + `_payloadhash` - - Key: payload hash - - Value: list of object IDs -- Buckets mapping parent ID to a list of children IDs - - Name: container ID + `_parent` - - Key: parent ID - - Value: list of children object IDs -- Buckets mapping split ID to a list of object IDs - - Name: container ID + `_splitid` - - Key: split ID - - Value: list of object IDs - -# History - -## Version 3 - -- Payload hash, owner ID and FKBT buckets deleted -- Expiration epoch to object ID and object ID to expiration epoch added - -## Version 2 - -- Container ID is encoded as 32-byte slice -- Object ID is encoded as 32-byte slice -- Object ID is encoded as 64-byte slice, container ID + object ID -- Bucket naming scheme is changed: - - container ID + suffix -> 1-byte prefix + container ID - -## Version 1 - -- Metabase now stores generic storage id instead of blobovnicza ID. - -## Version 0 - -- Container ID is encoded as base58 string -- Object ID is encoded as base58 string -- Address is encoded as container ID + "/" + object ID diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go deleted file mode 100644 index de1479e6f..000000000 --- a/pkg/local_object_storage/metabase/bucket_cache.go +++ /dev/null @@ -1,82 +0,0 @@ -package meta - -import ( - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.etcd.io/bbolt" -) - -type bucketCache struct { - locked *bbolt.Bucket - graveyard *bbolt.Bucket - garbage *bbolt.Bucket - expired map[cid.ID]*bbolt.Bucket - primary map[cid.ID]*bbolt.Bucket -} - -func newBucketCache() *bucketCache { - return &bucketCache{} -} - -func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { - if bc == nil { - return tx.Bucket(bucketNameLocked) - } - return getBucket(&bc.locked, tx, bucketNameLocked) -} - -func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { - if bc == nil { - return tx.Bucket(graveyardBucketName) - } - return getBucket(&bc.graveyard, tx, graveyardBucketName) -} - -func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { - if bc == nil { - return tx.Bucket(garbageBucketName) - } - return getBucket(&bc.garbage, tx, garbageBucketName) -} - -func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { - if *cache != nil { - return *cache - } - - *cache = tx.Bucket(name) - return *cache -} - -func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { - if bc == nil { - bucketName := make([]byte, bucketKeySize) - bucketName = objectToExpirationEpochBucketName(cnr, bucketName) - return tx.Bucket(bucketName) - } - return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr) -} - -func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { - if bc == nil { - bucketName := make([]byte, bucketKeySize) - bucketName = primaryBucketName(cnr, bucketName) - return tx.Bucket(bucketName) - } - return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr) -} - -func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { - value, ok := (*m)[cnr] - if ok { - return value - } - - if *m == nil { - *m = make(map[cid.ID]*bbolt.Bucket, 1) - } - - bucketName := make([]byte, bucketKeySize) - bucketName = nameFunc(cnr, bucketName) - (*m)[cnr] = getBucket(&value, tx, bucketName) - return value -} diff --git a/pkg/local_object_storage/metabase/children.go b/pkg/local_object_storage/metabase/children.go deleted file mode 100644 index acd367951..000000000 --- a/pkg/local_object_storage/metabase/children.go +++ /dev/null @@ -1,77 +0,0 @@ -package meta - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// GetChildren returns parent -> children map. -// If an object has no children, then map will contain addr -> empty slice value. -func (db *DB) GetChildren(ctx context.Context, addresses []oid.Address) (map[oid.Address][]oid.Address, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("GetChildren", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.GetChildren", - trace.WithAttributes( - attribute.Int("addr_count", len(addresses)), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - result := make(map[oid.Address][]oid.Address, len(addresses)) - - buffer := make([]byte, bucketKeySize) - err := db.boltDB.View(func(tx *bbolt.Tx) error { - for _, addr := range addresses { - if _, found := result[addr]; found { - continue - } - - result[addr] = []oid.Address{} - bkt := tx.Bucket(parentBucketName(addr.Container(), buffer)) - if bkt == nil { - continue - } - - binObjIDs, err := decodeList(bkt.Get(objectKey(addr.Object(), buffer))) - if err != nil { - return err - } - - for _, binObjID := range binObjIDs { - var id oid.ID - if err = id.Decode(binObjID); err != nil { - return err - } - var resultAddress oid.Address - resultAddress.SetContainer(addr.Container()) - resultAddress.SetObject(id) - result[addr] = append(result[addr], resultAddress) - } - } - return nil - }) - if err != nil { - return nil, metaerr.Wrap(err) - } - success = true - return result, nil -} diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go deleted file mode 100644 index da27e6085..000000000 --- a/pkg/local_object_storage/metabase/containers.go +++ /dev/null @@ -1,122 +0,0 @@ -package meta - -import ( - "context" - "encoding/binary" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.etcd.io/bbolt" -) - -func (db *DB) Containers(ctx context.Context) (list []cid.ID, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Containers", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Containers") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - err = db.boltDB.View(func(tx *bbolt.Tx) error { - list, err = db.containers(tx) - - return err - }) - success = err == nil - return list, metaerr.Wrap(err) -} - -func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) { - result := make([]cid.ID, 0) - unique := make(map[string]struct{}) - var cnr cid.ID - - err := tx.ForEach(func(name []byte, _ *bbolt.Bucket) error { - if parseContainerID(&cnr, name, unique) { - result = append(result, cnr) - unique[string(name[1:bucketKeySize])] = struct{}{} - } - - return nil - }) - - return result, err -} - -func (db *DB) ContainerSize(id cid.ID) (uint64, error) { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return 0, ErrDegradedMode - } - - var size uint64 - err := db.boltDB.View(func(tx *bbolt.Tx) error { - size = db.containerSize(tx, id) - - return nil - }) - - return size, metaerr.Wrap(err) -} - -func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 { - containerVolume := tx.Bucket(containerVolumeBucketName) - key := make([]byte, cidSize) - id.Encode(key) - - return parseContainerSize(containerVolume.Get(key)) -} - -func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool { - if len(name) != bucketKeySize { - return false - } - if _, ok := ignore[string(name[1:bucketKeySize])]; ok { - return false - } - return dst.Decode(name[1:bucketKeySize]) == nil -} - -func parseContainerSize(v []byte) uint64 { - if len(v) == 0 { - return 0 - } - - return binary.LittleEndian.Uint64(v) -} - -func changeContainerSize(tx *bbolt.Tx, id cid.ID, delta uint64, increase bool) error { - containerVolume := tx.Bucket(containerVolumeBucketName) - key := make([]byte, cidSize) - id.Encode(key) - - size := parseContainerSize(containerVolume.Get(key)) - - if increase { - size += delta - } else if size > delta { - size -= delta - } else { - size = 0 - } - - buf := make([]byte, 8) // consider using sync.Pool to decrease allocations - binary.LittleEndian.PutUint64(buf, size) - - return containerVolume.Put(key, buf) -} diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go deleted file mode 100644 index 8d8d91dc7..000000000 --- a/pkg/local_object_storage/metabase/containers_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package meta_test - -import ( - "context" - "math/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDB_Containers(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - const N = 10 - - cids := make(map[string]int, N) - - for range N { - obj := testutil.GenerateObject() - - cnr, _ := obj.ContainerID() - - cids[cnr.EncodeToString()] = 0 - - err := putBig(db, obj) - require.NoError(t, err) - } - - lst, err := db.Containers(context.Background()) - require.NoError(t, err) - - for _, cnr := range lst { - i, ok := cids[cnr.EncodeToString()] - require.True(t, ok) - require.Equal(t, 0, i) - - cids[cnr.EncodeToString()] = 1 - } - - // require.Contains not working since cnrs is a ptr slice - assertContains := func(cnrs []cid.ID, cnr cid.ID) { - found := false - for i := 0; !found && i < len(cnrs); i++ { - found = cnrs[i].Equals(cnr) - } - - require.True(t, found) - } - - t.Run("Inhume", func(t *testing.T) { - obj := testutil.GenerateObject() - - require.NoError(t, putBig(db, obj)) - - cnrs, err := db.Containers(context.Background()) - require.NoError(t, err) - cnr, _ := obj.ContainerID() - - assertContains(cnrs, cnr) - - require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID())) - - cnrs, err = db.Containers(context.Background()) - require.NoError(t, err) - assertContains(cnrs, cnr) - }) -} - -func TestDB_ContainersCount(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - const R, T, SG, L = 10, 11, 12, 13 // amount of object per type - - uploadObjects := [...]struct { - amount int - typ objectSDK.Type - }{ - {R, objectSDK.TypeRegular}, - {T, objectSDK.TypeTombstone}, - {L, objectSDK.TypeLock}, - } - - expected := make([]cid.ID, 0, R+T+SG+L) - - for _, upload := range uploadObjects { - for range upload.amount { - obj := testutil.GenerateObject() - obj.SetType(upload.typ) - - err := putBig(db, obj) - require.NoError(t, err) - - cnr, _ := obj.ContainerID() - expected = append(expected, cnr) - } - } - - got, err := db.Containers(context.Background()) - require.NoError(t, err) - require.ElementsMatch(t, expected, got) -} - -func TestDB_ContainerSize(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - const ( - C = 3 - N = 5 - ) - - cids := make(map[cid.ID]int, C) - objs := make(map[cid.ID][]*objectSDK.Object, C*N) - - for range C { - cnr := cidtest.ID() - cids[cnr] = 0 - - for range N { - size := rand.Intn(1024) - - parent := testutil.GenerateObjectWithCID(cnr) - parent.SetPayloadSize(uint64(size / 2)) - - obj := testutil.GenerateObjectWithCID(cnr) - obj.SetPayloadSize(uint64(size)) - idParent, _ := parent.ID() - obj.SetParentID(idParent) - obj.SetParent(parent) - - cids[cnr] += size - objs[cnr] = append(objs[cnr], obj) - - err := putBig(db, obj) - require.NoError(t, err) - } - } - - for cnr, volume := range cids { - n, err := db.ContainerSize(cnr) - require.NoError(t, err) - require.Equal(t, volume, int(n)) - } - - t.Run("Inhume", func(t *testing.T) { - for cnr, list := range objs { - volume := cids[cnr] - - for _, obj := range list { - require.NoError(t, metaInhume( - db, - object.AddressOf(obj), - oidtest.ID(), - )) - - volume -= int(obj.PayloadSize()) - - n, err := db.ContainerSize(cnr) - require.NoError(t, err) - require.Equal(t, volume, int(n)) - } - } - }) -} diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go deleted file mode 100644 index c19c65224..000000000 --- a/pkg/local_object_storage/metabase/control.go +++ /dev/null @@ -1,256 +0,0 @@ -package meta - -import ( - "context" - "errors" - "fmt" - "path/filepath" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -// ErrDegradedMode is returned when metabase is in a degraded mode. -var ErrDegradedMode = logicerr.New("metabase is in a degraded mode") - -// ErrReadOnlyMode is returned when metabase is in a read-only mode. -var ErrReadOnlyMode = logicerr.New("metabase is in a read-only mode") - -var ( - mStaticBuckets = map[string]struct{}{ - string(containerVolumeBucketName): {}, - string(containerCounterBucketName): {}, - string(graveyardBucketName): {}, - string(garbageBucketName): {}, - string(shardInfoBucket): {}, - string(bucketNameLocked): {}, - string(expEpochToObjectBucketName): {}, - } - - // deprecatedBuckets buckets that are not used anymore. - deprecatedBuckets = [][]byte{ - toMoveItBucketName, - } -) - -// Open boltDB instance for metabase. -func (db *DB) Open(ctx context.Context, m mode.Mode) error { - db.modeMtx.Lock() - defer db.modeMtx.Unlock() - db.mode = m - db.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) - - if m.NoMetabase() { - return nil - } - return db.openDB(ctx, m) -} - -func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { - err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission) - if err != nil { - return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err) - } - - db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) - - if db.boltOptions == nil { - opts := *bbolt.DefaultOptions - db.boltOptions = &opts - } - db.boltOptions.ReadOnly = mode.ReadOnly() - - return metaerr.Wrap(db.openBolt(ctx)) -} - -func (db *DB) openBolt(ctx context.Context) error { - var err error - - db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions) - if err != nil { - return fmt.Errorf("open boltDB database: %w", err) - } - db.boltDB.MaxBatchDelay = db.boltBatchDelay - db.boltDB.MaxBatchSize = db.boltBatchSize - - db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase) - - db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion) - return db.boltDB.View(func(tx *bbolt.Tx) error { - // The safest way to check if the metabase is fresh is to check if it has no buckets. - // However, shard info can be present. So here we check that the number of buckets is - // at most 1. - // Another thing to consider is that tests do not persist shard ID, we want to support - // this case too. - var n int - err := tx.ForEach(func([]byte, *bbolt.Bucket) error { - if n++; n >= 2 { // do not iterate a lot - return errBreakBucketForEach - } - return nil - }) - - if err == errBreakBucketForEach { - db.initialized = true - err = nil - } - return err - }) -} - -// Init initializes metabase. It creates static (CID-independent) buckets in underlying BoltDB instance. -// -// Returns ErrOutdatedVersion if a database at the provided path is outdated. -// -// Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state, -// use Reset. -func (db *DB) Init(_ context.Context) error { - return metaerr.Wrap(db.init(false)) -} - -// Reset resets metabase. Works similar to Init but cleans up all static buckets and -// removes all dynamic (CID-dependent) ones in non-blank BoltDB instances. -func (db *DB) Reset() error { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - return metaerr.Wrap(db.init(true)) -} - -func (db *DB) init(reset bool) error { - if db.mode.NoMetabase() || db.mode.ReadOnly() { - return nil - } - - return db.boltDB.Update(func(tx *bbolt.Tx) error { - var err error - if !reset { - // Normal open, check version and update if not initialized. - err := checkVersion(tx, db.initialized) - if err != nil { - return err - } - } - for k := range mStaticBuckets { - name := []byte(k) - if reset { - err := tx.DeleteBucket(name) - if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("delete static bucket %s: %w", k, err) - } - } - - _, err := tx.CreateBucketIfNotExists(name) - if err != nil { - return fmt.Errorf("create static bucket %s: %w", k, err) - } - } - - for _, b := range deprecatedBuckets { - err := tx.DeleteBucket(b) - if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err) - } - } - - if !reset { // counters will be recalculated by refill metabase - err = syncCounter(tx, false) - if err != nil { - return fmt.Errorf("sync object counter: %w", err) - } - - return nil - } - - bucketCursor := tx.Cursor() - name, _ := bucketCursor.First() - for name != nil { - if _, ok := mStaticBuckets[string(name)]; !ok { - if err := tx.DeleteBucket(name); err != nil { - return err - } - name, _ = bucketCursor.Seek(name) - continue - } - name, _ = bucketCursor.Next() - } - return updateVersion(tx, version) - }) -} - -// SyncCounters forces to synchronize the object counters. -func (db *DB) SyncCounters() error { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } else if db.mode.ReadOnly() { - return ErrReadOnlyMode - } - - return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error { - return syncCounter(tx, true) - })) -} - -// Close closes boltDB instance -// and reports metabase metric. -func (db *DB) Close(context.Context) error { - var err error - if db.boltDB != nil { - err = db.close() - } - if err == nil { - db.metrics.Close() - } - return err -} - -func (db *DB) close() error { - return metaerr.Wrap(db.boltDB.Close()) -} - -// Reload reloads part of the configuration. -// It returns true iff database was reopened. -// If a config option is invalid, it logs an error and returns nil. -// If there was a problem with applying new configuration, an error is returned. -// -// If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned. -func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) { - var c cfg - for i := range opts { - opts[i](&c) - } - - db.modeMtx.Lock() - defer db.modeMtx.Unlock() - - if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) { - if err := db.Close(ctx); err != nil { - return false, err - } - - db.mode = mode.Disabled - db.metrics.SetMode(mode.ComponentDisabled) - db.info.Path = c.info.Path - if err := db.openBolt(ctx); err != nil { - return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err)) - } - - db.mode = mode.ReadWrite - db.metrics.SetMode(mode.ComponentReadWrite) - return true, nil - } - - return false, nil -} diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go deleted file mode 100644 index d26402675..000000000 --- a/pkg/local_object_storage/metabase/control_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestReset(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - err := db.Reset() - require.NoError(t, err) - - obj := testutil.GenerateObject() - addr := object.AddressOf(obj) - - addrToInhume := oidtest.Address() - - assertExists := func(addr oid.Address, expExists bool, assertErr func(error) bool) { - exists, err := metaExists(db, addr) - if assertErr != nil { - require.True(t, assertErr(err)) - } else { - require.NoError(t, err) - } - require.Equal(t, expExists, exists) - } - - assertExists(addr, false, nil) - assertExists(addrToInhume, false, nil) - - err = putBig(db, obj) - require.NoError(t, err) - - err = metaInhume(db, addrToInhume, oidtest.ID()) - require.NoError(t, err) - - assertExists(addr, true, nil) - assertExists(addrToInhume, false, client.IsErrObjectAlreadyRemoved) - - err = db.Reset() - require.NoError(t, err) - - assertExists(addr, false, nil) - assertExists(addr, false, nil) -} - -func metaExists(db *meta.DB, addr oid.Address) (bool, error) { - var existsPrm meta.ExistsPrm - existsPrm.SetAddress(addr) - - res, err := db.Exists(context.Background(), existsPrm) - return res.Exists(), err -} diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go deleted file mode 100644 index 732f99519..000000000 --- a/pkg/local_object_storage/metabase/counter.go +++ /dev/null @@ -1,749 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var ( - objectPhyCounterKey = []byte("phy_counter") - objectLogicCounterKey = []byte("logic_counter") - objectUserCounterKey = []byte("user_counter") -) - -var ( - errInvalidKeyLenght = errors.New("invalid key length") - errInvalidValueLenght = errors.New("invalid value length") -) - -type objectType uint8 - -const ( - _ objectType = iota - phy - logical - user -) - -// ObjectCounters groups object counter -// according to metabase state. -type ObjectCounters struct { - Logic uint64 - Phy uint64 - User uint64 -} - -func (o ObjectCounters) IsZero() bool { - return o.Phy == 0 && o.Logic == 0 && o.User == 0 -} - -// ObjectCounters returns object counters that metabase has -// tracked since it was opened and initialized. -// -// Returns only the errors that do not allow reading counter -// in Bolt database. -func (db *DB) ObjectCounters() (cc ObjectCounters, err error) { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ObjectCounters{}, ErrDegradedMode - } - - err = db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(shardInfoBucket) - if b != nil { - data := b.Get(objectPhyCounterKey) - if len(data) == 8 { - cc.Phy = binary.LittleEndian.Uint64(data) - } - - data = b.Get(objectLogicCounterKey) - if len(data) == 8 { - cc.Logic = binary.LittleEndian.Uint64(data) - } - - data = b.Get(objectUserCounterKey) - if len(data) == 8 { - cc.User = binary.LittleEndian.Uint64(data) - } - } - - return nil - }) - - return cc, metaerr.Wrap(err) -} - -type ContainerCounters struct { - Counts map[cid.ID]ObjectCounters -} - -// ContainerCounters returns object counters for each container -// that metabase has tracked since it was opened and initialized. -// -// Returns only the errors that do not allow reading counter -// in Bolt database. -// -// It is guaranteed that the ContainerCounters fields are not nil. -func (db *DB) ContainerCounters(ctx context.Context) (ContainerCounters, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("ContainerCounters", time.Since(startedAt), success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ContainerCounters") - defer span.End() - - cc := ContainerCounters{ - Counts: make(map[cid.ID]ObjectCounters), - } - - lastKey := make([]byte, cidSize) - - // there is no limit for containers count, so use batching with cancellation - for { - select { - case <-ctx.Done(): - return cc, ctx.Err() - default: - } - - completed, err := db.containerCountersNextBatch(lastKey, func(id cid.ID, entity ObjectCounters) { - cc.Counts[id] = entity - }) - if err != nil { - return cc, err - } - if completed { - break - } - } - - success = true - return cc, nil -} - -func (db *DB) containerCountersNextBatch(lastKey []byte, f func(id cid.ID, entity ObjectCounters)) (bool, error) { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return false, ErrDegradedMode - } - - counter := 0 - const batchSize = 1000 - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(containerCounterBucketName) - if b == nil { - return ErrInterruptIterator - } - c := b.Cursor() - var key, value []byte - for key, value = c.Seek(lastKey); key != nil; key, value = c.Next() { - if bytes.Equal(lastKey, key) { - continue - } - copy(lastKey, key) - - cnrID, err := parseContainerCounterKey(key) - if err != nil { - return err - } - ent, err := parseContainerCounterValue(value) - if err != nil { - return err - } - f(cnrID, ent) - - counter++ - if counter == batchSize { - break - } - } - - if counter < batchSize { // last batch - return ErrInterruptIterator - } - return nil - }) - if err != nil { - if errors.Is(err, ErrInterruptIterator) { - return true, nil - } - return false, metaerr.Wrap(err) - } - return false, nil -} - -func (db *DB) ContainerCount(ctx context.Context, id cid.ID) (ObjectCounters, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("ContainerCount", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.ContainerCount") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ObjectCounters{}, ErrDegradedMode - } - - var result ObjectCounters - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(containerCounterBucketName) - key := make([]byte, cidSize) - id.Encode(key) - v := b.Get(key) - if v == nil { - return nil - } - var err error - result, err = parseContainerCounterValue(v) - return err - }) - - return result, metaerr.Wrap(err) -} - -func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { - b := tx.Bucket(shardInfoBucket) - if b == nil { - return db.incContainerObjectCounter(tx, cnrID, isUserObject) - } - - if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil { - return fmt.Errorf("increase phy object counter: %w", err) - } - if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil { - return fmt.Errorf("increase logical object counter: %w", err) - } - if isUserObject { - if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil { - return fmt.Errorf("increase user object counter: %w", err) - } - } - return db.incContainerObjectCounter(tx, cnrID, isUserObject) -} - -func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error { - b := tx.Bucket(shardInfoBucket) - if b == nil { - return nil - } - - return db.updateShardObjectCounterBucket(b, typ, delta, false) -} - -func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error { - var counter uint64 - var counterKey []byte - - switch typ { - case phy: - counterKey = objectPhyCounterKey - case logical: - counterKey = objectLogicCounterKey - case user: - counterKey = objectUserCounterKey - default: - panic("unknown object type counter") - } - - data := b.Get(counterKey) - if len(data) == 8 { - counter = binary.LittleEndian.Uint64(data) - } - - if inc { - counter += delta - } else if counter <= delta { - counter = 0 - } else { - counter -= delta - } - - newCounter := make([]byte, 8) - binary.LittleEndian.PutUint64(newCounter, counter) - - return b.Put(counterKey, newCounter) -} - -func (db *DB) updateContainerCounter(tx *bbolt.Tx, delta map[cid.ID]ObjectCounters, inc bool) error { - b := tx.Bucket(containerCounterBucketName) - if b == nil { - return nil - } - - key := make([]byte, cidSize) - for cnrID, cnrDelta := range delta { - cnrID.Encode(key) - if err := db.editContainerCounterValue(b, key, cnrDelta, inc); err != nil { - return err - } - } - return nil -} - -func (*DB) editContainerCounterValue(b *bbolt.Bucket, key []byte, delta ObjectCounters, inc bool) error { - var entity ObjectCounters - var err error - data := b.Get(key) - if len(data) > 0 { - entity, err = parseContainerCounterValue(data) - if err != nil { - return err - } - } - entity.Phy = nextValue(entity.Phy, delta.Phy, inc) - entity.Logic = nextValue(entity.Logic, delta.Logic, inc) - entity.User = nextValue(entity.User, delta.User, inc) - value := containerCounterValue(entity) - return b.Put(key, value) -} - -func nextValue(existed, delta uint64, inc bool) uint64 { - if inc { - existed += delta - } else if existed <= delta { - existed = 0 - } else { - existed -= delta - } - return existed -} - -func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { - b := tx.Bucket(containerCounterBucketName) - if b == nil { - return nil - } - - key := make([]byte, cidSize) - cnrID.Encode(key) - c := ObjectCounters{Logic: 1, Phy: 1} - if isUserObject { - c.User = 1 - } - return db.editContainerCounterValue(b, key, c, true) -} - -// syncCounter updates object counters according to metabase state: -// it counts all the physically/logically stored objects using internal -// indexes. Tx MUST be writable. -// -// Does nothing if counters are not empty and force is false. If force is -// true, updates the counters anyway. -func syncCounter(tx *bbolt.Tx, force bool) error { - shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket) - if err != nil { - return fmt.Errorf("get shard info bucket: %w", err) - } - shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 && - len(shardInfoB.Get(objectLogicCounterKey)) == 8 && - len(shardInfoB.Get(objectUserCounterKey)) == 8 - containerObjectCounterInitialized := containerObjectCounterInitialized(tx) - if !force && shardObjectCounterInitialized && containerObjectCounterInitialized { - // the counters are already inited - return nil - } - - containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName) - if err != nil { - return fmt.Errorf("get container counter bucket: %w", err) - } - - var addr oid.Address - counters := make(map[cid.ID]ObjectCounters) - - graveyardBKT := tx.Bucket(graveyardBucketName) - garbageBKT := tx.Bucket(garbageBucketName) - key := make([]byte, addressKeySize) - var isAvailable bool - - err = iteratePhyObjects(tx, func(cnr cid.ID, objID oid.ID, obj *objectSDK.Object) error { - if v, ok := counters[cnr]; ok { - v.Phy++ - counters[cnr] = v - } else { - counters[cnr] = ObjectCounters{ - Phy: 1, - } - } - - addr.SetContainer(cnr) - addr.SetObject(objID) - isAvailable = false - - // check if an object is available: not with GCMark - // and not covered with a tombstone - if inGraveyardWithKey(addressKey(addr, key), graveyardBKT, garbageBKT) == 0 { - if v, ok := counters[cnr]; ok { - v.Logic++ - counters[cnr] = v - } else { - counters[cnr] = ObjectCounters{ - Logic: 1, - } - } - isAvailable = true - } - - if isAvailable && IsUserObject(obj) { - if v, ok := counters[cnr]; ok { - v.User++ - counters[cnr] = v - } else { - counters[cnr] = ObjectCounters{ - User: 1, - } - } - } - - return nil - }) - if err != nil { - return fmt.Errorf("iterate objects: %w", err) - } - - return setObjectCounters(counters, shardInfoB, containerCounterB) -} - -func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, containerCounterB *bbolt.Bucket) error { - var phyTotal uint64 - var logicTotal uint64 - var userTotal uint64 - key := make([]byte, cidSize) - for cnrID, count := range counters { - phyTotal += count.Phy - logicTotal += count.Logic - userTotal += count.User - - cnrID.Encode(key) - value := containerCounterValue(count) - err := containerCounterB.Put(key, value) - if err != nil { - return fmt.Errorf("update phy container object counter: %w", err) - } - } - phyData := make([]byte, 8) - binary.LittleEndian.PutUint64(phyData, phyTotal) - - err := shardInfoB.Put(objectPhyCounterKey, phyData) - if err != nil { - return fmt.Errorf("update phy object counter: %w", err) - } - - logData := make([]byte, 8) - binary.LittleEndian.PutUint64(logData, logicTotal) - - err = shardInfoB.Put(objectLogicCounterKey, logData) - if err != nil { - return fmt.Errorf("update logic object counter: %w", err) - } - - userData := make([]byte, 8) - binary.LittleEndian.PutUint64(userData, userTotal) - - err = shardInfoB.Put(objectUserCounterKey, userData) - if err != nil { - return fmt.Errorf("update user object counter: %w", err) - } - - return nil -} - -func containerCounterValue(entity ObjectCounters) []byte { - res := make([]byte, 24) - binary.LittleEndian.PutUint64(res, entity.Phy) - binary.LittleEndian.PutUint64(res[8:], entity.Logic) - binary.LittleEndian.PutUint64(res[16:], entity.User) - return res -} - -func parseContainerCounterKey(buf []byte) (cid.ID, error) { - if len(buf) != cidSize { - return cid.ID{}, errInvalidKeyLenght - } - var cnrID cid.ID - if err := cnrID.Decode(buf); err != nil { - return cid.ID{}, fmt.Errorf("decode container ID: %w", err) - } - return cnrID, nil -} - -// parseContainerCounterValue return phy, logic values. -func parseContainerCounterValue(buf []byte) (ObjectCounters, error) { - if len(buf) != 24 { - return ObjectCounters{}, errInvalidValueLenght - } - return ObjectCounters{ - Phy: binary.LittleEndian.Uint64(buf), - Logic: binary.LittleEndian.Uint64(buf[8:16]), - User: binary.LittleEndian.Uint64(buf[16:]), - }, nil -} - -func containerObjectCounterInitialized(tx *bbolt.Tx) bool { - b := tx.Bucket(containerCounterBucketName) - if b == nil { - return false - } - k, v := b.Cursor().First() - if k == nil && v == nil { - return true - } - _, err := parseContainerCounterKey(k) - if err != nil { - return false - } - _, err = parseContainerCounterValue(v) - return err == nil -} - -func IsUserObject(obj *objectSDK.Object) bool { - ech := obj.ECHeader() - if ech == nil { - _, hasParentID := obj.ParentID() - return obj.Type() == objectSDK.TypeRegular && - (obj.SplitID() == nil || - (hasParentID && len(obj.Children()) == 0)) - } - return ech.Index() == 0 && (ech.ParentSplitID() == nil || ech.ParentSplitParentID() != nil) -} - -// ZeroSizeContainers returns containers with size = 0. -func (db *DB) ZeroSizeContainers(ctx context.Context) ([]cid.ID, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("ZeroSizeContainers", time.Since(startedAt), success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ZeroSizeContainers") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - var result []cid.ID - lastKey := make([]byte, cidSize) - - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - completed, err := db.containerSizesNextBatch(lastKey, func(contID cid.ID, size uint64) { - if size == 0 { - result = append(result, contID) - } - }) - if err != nil { - return nil, err - } - if completed { - break - } - } - - success = true - return result, nil -} - -func (db *DB) containerSizesNextBatch(lastKey []byte, f func(cid.ID, uint64)) (bool, error) { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return false, ErrDegradedMode - } - - counter := 0 - const batchSize = 1000 - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(containerVolumeBucketName) - c := b.Cursor() - var key, value []byte - for key, value = c.Seek(lastKey); key != nil; key, value = c.Next() { - if bytes.Equal(lastKey, key) { - continue - } - copy(lastKey, key) - - size := parseContainerSize(value) - var id cid.ID - if err := id.Decode(key); err != nil { - return err - } - f(id, size) - - counter++ - if counter == batchSize { - break - } - } - - if counter < batchSize { - return ErrInterruptIterator - } - return nil - }) - if err != nil { - if errors.Is(err, ErrInterruptIterator) { - return true, nil - } - return false, metaerr.Wrap(err) - } - return false, nil -} - -func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("DeleteContainerSize", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.DeleteContainerSize", - trace.WithAttributes( - attribute.Stringer("container_id", id), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - if db.mode.ReadOnly() { - return ErrReadOnlyMode - } - - err := db.boltDB.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(containerVolumeBucketName) - - key := make([]byte, cidSize) - id.Encode(key) - return b.Delete(key) - }) - success = err == nil - return metaerr.Wrap(err) -} - -// ZeroCountContainers returns containers with objects count = 0 in metabase. -func (db *DB) ZeroCountContainers(ctx context.Context) ([]cid.ID, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("ZeroCountContainers", time.Since(startedAt), success) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ZeroCountContainers") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - var result []cid.ID - - lastKey := make([]byte, cidSize) - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - completed, err := db.containerCountersNextBatch(lastKey, func(id cid.ID, entity ObjectCounters) { - if entity.IsZero() { - result = append(result, id) - } - }) - if err != nil { - return nil, metaerr.Wrap(err) - } - if completed { - break - } - } - success = true - return result, nil -} - -func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("DeleteContainerCount", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.DeleteContainerCount", - trace.WithAttributes( - attribute.Stringer("container_id", id), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - if db.mode.ReadOnly() { - return ErrReadOnlyMode - } - - err := db.boltDB.Batch(func(tx *bbolt.Tx) error { - b := tx.Bucket(containerCounterBucketName) - - key := make([]byte, cidSize) - id.Encode(key) - return b.Delete(key) - }) - success = err == nil - return metaerr.Wrap(err) -} diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go deleted file mode 100644 index 950385a29..000000000 --- a/pkg/local_object_storage/metabase/counter_test.go +++ /dev/null @@ -1,569 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -const objCount = 10 - -func TestCounters(t *testing.T) { - t.Parallel() - - t.Run("defaults", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - c, err := db.ObjectCounters() - require.NoError(t, err) - require.Zero(t, c.Phy) - require.Zero(t, c.Logic) - require.Zero(t, c.User) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Zero(t, len(cc.Counts)) - }) - - t.Run("put", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - oo := make([]*objectSDK.Object, 0, objCount) - for range objCount { - oo = append(oo, testutil.GenerateObject()) - } - - var prm meta.PutPrm - exp := make(map[cid.ID]meta.ObjectCounters) - - for i := range objCount { - prm.SetObject(oo[i]) - cnrID, _ := oo[i].ContainerID() - c := meta.ObjectCounters{} - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - - c, err = db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(i+1), c.Phy) - require.Equal(t, uint64(i+1), c.Logic) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - } - }) - - t.Run("delete", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - oo := putObjs(t, db, objCount, false) - - exp := make(map[cid.ID]meta.ObjectCounters) - for _, obj := range oo { - cnrID, _ := obj.ContainerID() - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - } - - var prm meta.DeletePrm - for i := objCount - 1; i >= 0; i-- { - prm.SetAddresses(objectcore.AddressOf(oo[i])) - - res, err := db.Delete(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(1), res.LogicCount()) - - c, err := db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(i), c.Phy) - require.Equal(t, uint64(i), c.Logic) - require.Equal(t, uint64(i), c.User) - - cnrID, _ := oo[i].ContainerID() - if v, ok := exp[cnrID]; ok { - v.Phy-- - v.Logic-- - v.User-- - exp[cnrID] = v - } - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - } - }) - - t.Run("inhume", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - oo := putObjs(t, db, objCount, false) - - exp := make(map[cid.ID]meta.ObjectCounters) - for _, obj := range oo { - cnrID, _ := obj.ContainerID() - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - } - - inhumedObjs := make([]oid.Address, objCount/2) - - for i, o := range oo { - if i == len(inhumedObjs) { - break - } - - inhumedObjs[i] = objectcore.AddressOf(o) - } - - for _, addr := range inhumedObjs { - if v, ok := exp[addr.Container()]; ok { - v.Logic-- - v.User-- - if v.IsZero() { - delete(exp, addr.Container()) - } else { - exp[addr.Container()] = v - } - } - } - - var prm meta.InhumePrm - for _, o := range inhumedObjs { - tombAddr := oidtest.Address() - tombAddr.SetContainer(o.Container()) - - prm.SetTombstoneAddress(tombAddr) - prm.SetAddresses(o) - - res, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(1), res.LogicInhumed()) - require.Equal(t, uint64(1), res.UserInhumed()) - } - - c, err := db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(objCount), c.Phy) - require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic) - require.Equal(t, uint64(objCount-len(inhumedObjs)), c.User) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - }) - - t.Run("put_split", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - parObj := testutil.GenerateObject() - - exp := make(map[cid.ID]meta.ObjectCounters) - - // put objects and check that parent info - // does not affect the counter - for i := range objCount { - o := testutil.GenerateObject() - if i < objCount/2 { // half of the objs will have the parent - o.SetParent(parObj) - o.SetSplitID(objectSDK.NewSplitID()) - } - - cnrID, _ := o.ContainerID() - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - - require.NoError(t, putBig(db, o)) - - c, err := db.ObjectCounters() - require.NoError(t, err) - require.Equal(t, uint64(i+1), c.Phy) - require.Equal(t, uint64(i+1), c.Logic) - require.Equal(t, uint64(i+1), c.User) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - } - }) - - t.Run("delete_split", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - oo := putObjs(t, db, objCount, true) - - exp := make(map[cid.ID]meta.ObjectCounters) - for _, obj := range oo { - cnrID, _ := obj.ContainerID() - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - } - - // delete objects that have parent info - // and check that it does not affect - // the counter - for i, o := range oo { - addr := objectcore.AddressOf(o) - require.NoError(t, metaDelete(db, addr)) - - c, err := db.ObjectCounters() - require.NoError(t, err) - require.Equal(t, uint64(objCount-i-1), c.Phy) - require.Equal(t, uint64(objCount-i-1), c.Logic) - require.Equal(t, uint64(objCount-i-1), c.User) - - if v, ok := exp[addr.Container()]; ok { - v.Logic-- - v.Phy-- - v.User-- - if v.IsZero() { - delete(exp, addr.Container()) - } else { - exp[addr.Container()] = v - } - } - } - }) - - t.Run("inhume_split", func(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - oo := putObjs(t, db, objCount, true) - - exp := make(map[cid.ID]meta.ObjectCounters) - for _, obj := range oo { - cnrID, _ := obj.ContainerID() - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - } - - inhumedObjs := make([]oid.Address, objCount/2) - - for i, o := range oo { - if i == len(inhumedObjs) { - break - } - - inhumedObjs[i] = objectcore.AddressOf(o) - } - - for _, addr := range inhumedObjs { - if v, ok := exp[addr.Container()]; ok { - v.Logic-- - v.User-- - if v.IsZero() { - delete(exp, addr.Container()) - } else { - exp[addr.Container()] = v - } - } - } - - var prm meta.InhumePrm - for _, o := range inhumedObjs { - tombAddr := oidtest.Address() - tombAddr.SetContainer(o.Container()) - - prm.SetTombstoneAddress(tombAddr) - prm.SetAddresses(o) - - _, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) - } - - c, err := db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(objCount), c.Phy) - require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic) - require.Equal(t, uint64(objCount-len(inhumedObjs)), c.User) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - }) -} - -func TestDoublePut(t *testing.T) { - t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - obj := testutil.GenerateObject() - - exp := make(map[cid.ID]meta.ObjectCounters) - cnrID, _ := obj.ContainerID() - exp[cnrID] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - - var prm meta.PutPrm - prm.SetObject(obj) - pr, err := db.Put(context.Background(), prm) - require.NoError(t, err) - require.True(t, pr.Inserted) - - c, err := db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(1), c.Phy) - require.Equal(t, uint64(1), c.Logic) - require.Equal(t, uint64(1), c.User) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - - pr, err = db.Put(context.Background(), prm) - require.NoError(t, err) - require.False(t, pr.Inserted) - - c, err = db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(1), c.Phy) - require.Equal(t, uint64(1), c.Logic) - require.Equal(t, uint64(1), c.User) - - cc, err = db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) -} - -func TestCounters_Expired(t *testing.T) { - // That test is about expired objects without - // GCMark yet. Such objects should be treated as - // logically available: decrementing logic counter - // should be done explicitly and only in `Delete` - // and `Inhume` operations, otherwise, it would be - // impossible to maintain logic counter. - - const epoch = 123 - - es := &epochState{epoch} - db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - oo := make([]oid.Address, objCount) - for i := range oo { - oo[i] = putWithExpiration(t, db, objectSDK.TypeRegular, epoch+1) - } - - exp := make(map[cid.ID]meta.ObjectCounters) - for _, addr := range oo { - exp[addr.Container()] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - } - - // 1. objects are available and counters are correct - - c, err := db.ObjectCounters() - require.NoError(t, err) - require.Equal(t, uint64(objCount), c.Phy) - require.Equal(t, uint64(objCount), c.Logic) - require.Equal(t, uint64(objCount), c.User) - - cc, err := db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - - for _, o := range oo { - _, err := metaGet(db, o, true) - require.NoError(t, err) - } - - // 2. objects are expired, not available but logic counter - // is the same - - es.e = epoch + 2 - - c, err = db.ObjectCounters() - require.NoError(t, err) - require.Equal(t, uint64(objCount), c.Phy) - require.Equal(t, uint64(objCount), c.Logic) - require.Equal(t, uint64(objCount), c.User) - - cc, err = db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - - for _, o := range oo { - _, err := metaGet(db, o, true) - require.ErrorIs(t, err, meta.ErrObjectIsExpired) - } - - // 3. inhuming an expired object with GCMark (like it would - // the GC do) should decrease the logic counter despite the - // expiration fact - - var inhumePrm meta.InhumePrm - inhumePrm.SetGCMark() - inhumePrm.SetAddresses(oo[0]) - - inhumeRes, err := db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - require.Equal(t, uint64(1), inhumeRes.LogicInhumed()) - require.Equal(t, uint64(1), inhumeRes.UserInhumed()) - - c, err = db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(len(oo)), c.Phy) - require.Equal(t, uint64(len(oo)-1), c.Logic) - require.Equal(t, uint64(len(oo)-1), c.User) - - if v, ok := exp[oo[0].Container()]; ok { - v.Logic-- - v.User-- - if v.IsZero() { - delete(exp, oo[0].Container()) - } else { - exp[oo[0].Container()] = v - } - } - - cc, err = db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - - // 4. `Delete` an object with GCMark should decrease the - // phy counter but does not affect the logic counter (after - // that step they should be equal) - - var deletePrm meta.DeletePrm - deletePrm.SetAddresses(oo[0]) - - deleteRes, err := db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - require.Zero(t, deleteRes.LogicCount()) - require.Zero(t, deleteRes.UserCount()) - - if v, ok := exp[oo[0].Container()]; ok { - v.Phy-- - exp[oo[0].Container()] = v - } - - oo = oo[1:] - - c, err = db.ObjectCounters() - require.NoError(t, err) - require.Equal(t, uint64(len(oo)), c.Phy) - require.Equal(t, uint64(len(oo)), c.Logic) - require.Equal(t, uint64(len(oo)), c.User) - - cc, err = db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) - - // 5 `Delete` an expired object (like it would the control - // service do) should decrease both counters despite the - // expiration fact - - deletePrm.SetAddresses(oo[0]) - - deleteRes, err = db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - require.Equal(t, uint64(1), deleteRes.LogicCount()) - require.Equal(t, uint64(1), deleteRes.UserCount()) - - if v, ok := exp[oo[0].Container()]; ok { - v.Phy-- - v.Logic-- - v.User-- - exp[oo[0].Container()] = v - } - - oo = oo[1:] - - c, err = db.ObjectCounters() - require.NoError(t, err) - require.Equal(t, uint64(len(oo)), c.Phy) - require.Equal(t, uint64(len(oo)), c.Logic) - require.Equal(t, uint64(len(oo)), c.User) - - cc, err = db.ContainerCounters(context.Background()) - require.NoError(t, err) - - require.Equal(t, meta.ContainerCounters{Counts: exp}, cc) -} - -func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK.Object { - var prm meta.PutPrm - var err error - parent := testutil.GenerateObject() - - oo := make([]*objectSDK.Object, 0, count) - for i := range count { - o := testutil.GenerateObject() - if withParent { - o.SetParent(parent) - o.SetSplitID(objectSDK.NewSplitID()) - } - - oo = append(oo, o) - - prm.SetObject(o) - _, err = db.Put(context.Background(), prm) - require.NoError(t, err) - - c, err := db.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, uint64(i+1), c.Phy) - require.Equal(t, uint64(i+1), c.Logic) - } - - return oo -} diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go deleted file mode 100644 index 4474aa229..000000000 --- a/pkg/local_object_storage/metabase/db.go +++ /dev/null @@ -1,366 +0,0 @@ -package meta - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "io/fs" - "os" - "strconv" - "strings" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/mr-tron/base58" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -type matcher struct { - matchSlow func(string, []byte, string) bool - matchBucket func(*bbolt.Bucket, string, string, func([]byte, []byte) error) error -} - -// EpochState is an interface that provides access to the -// current epoch number. -type EpochState interface { - // CurrentEpoch must return current epoch height. - CurrentEpoch() uint64 -} - -// DB represents local metabase of storage node. -type DB struct { - *cfg - - modeMtx sync.RWMutex - mode mode.Mode - - matchers map[objectSDK.SearchMatchType]matcher - - boltDB *bbolt.DB - - initialized bool -} - -// Option is an option of DB constructor. -type Option func(*cfg) - -type cfg struct { - boltOptions *bbolt.Options // optional - - boltBatchSize int - boltBatchDelay time.Duration - - info Info - - log *logger.Logger - - epochState EpochState - metrics Metrics -} - -func defaultCfg() *cfg { - return &cfg{ - info: Info{ - Permission: os.ModePerm, // 0777 - }, - boltBatchDelay: bbolt.DefaultMaxBatchDelay, - boltBatchSize: bbolt.DefaultMaxBatchSize, - log: logger.NewLoggerWrapper(zap.L()), - metrics: &noopMetrics{}, - } -} - -// New creates and returns new Metabase instance. -func New(opts ...Option) *DB { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - if c.epochState == nil { - panic("metabase: epoch state is not specified") - } - - return &DB{ - cfg: c, - matchers: map[objectSDK.SearchMatchType]matcher{ - objectSDK.MatchUnknown: { - matchSlow: unknownMatcher, - matchBucket: unknownMatcherBucket, - }, - objectSDK.MatchStringEqual: { - matchSlow: stringEqualMatcher, - matchBucket: stringEqualMatcherBucket, - }, - objectSDK.MatchStringNotEqual: { - matchSlow: stringNotEqualMatcher, - matchBucket: stringNotEqualMatcherBucket, - }, - objectSDK.MatchCommonPrefix: { - matchSlow: stringCommonPrefixMatcher, - matchBucket: stringCommonPrefixMatcherBucket, - }, - }, - mode: mode.Disabled, - } -} - -func stringifyValue(key string, objVal []byte) string { - switch key { - default: - return string(objVal) - case v2object.FilterHeaderObjectID, v2object.FilterHeaderContainerID, v2object.FilterHeaderParent: - return base58.Encode(objVal) - case v2object.FilterHeaderPayloadHash, v2object.FilterHeaderHomomorphicHash: - return hex.EncodeToString(objVal) - case v2object.FilterHeaderCreationEpoch, v2object.FilterHeaderPayloadLength: - return strconv.FormatUint(binary.LittleEndian.Uint64(objVal), 10) - } -} - -// fromHexChar converts a hex character into its value and a success flag. -func fromHexChar(c byte) (byte, bool) { - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - - return 0, false -} - -// destringifyValue is the reverse operation for stringify value. -// The last return value returns true if the filter CAN match any value. -// The second return value is true iff prefix is true and the filter value is considered -// a hex-encoded string. In this case only the first (highest) bits of the last byte should be checked. -func destringifyValue(key, value string, prefix bool) ([]byte, bool, bool) { - switch key { - default: - return []byte(value), false, true - case v2object.FilterHeaderObjectID, v2object.FilterHeaderContainerID, v2object.FilterHeaderParent: - v, err := base58.Decode(value) - return v, false, err == nil - case v2object.FilterHeaderPayloadHash, v2object.FilterHeaderHomomorphicHash: - v, err := hex.DecodeString(value) - if err != nil { - if !prefix || len(value)%2 == 0 { - return v, false, false - } - // To match the old behaviour we need to process odd length hex strings, such as 'abc' - last, ok := fromHexChar(value[len(value)-1]) - if !ok { - return v, false, false - } - - v := make([]byte, hex.DecodedLen(len(value)-1)+1) - _, err := hex.Decode(v, []byte(value[:len(value)-1])) - if err != nil { - return nil, false, false - } - v[len(v)-1] = last - - return v, true, true - } - return v, false, err == nil - case v2object.FilterHeaderCreationEpoch, v2object.FilterHeaderPayloadLength: - u, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return nil, false, false - } - raw := make([]byte, 8) - binary.LittleEndian.PutUint64(raw, u) - return raw, false, true - } -} - -func stringEqualMatcher(key string, objVal []byte, filterVal string) bool { - return stringifyValue(key, objVal) == filterVal -} - -func stringEqualMatcherBucket(b *bbolt.Bucket, fKey string, fValue string, f func([]byte, []byte) error) error { - // Ignore the second return value because we check for strict equality. - val, _, ok := destringifyValue(fKey, fValue, false) - if !ok { - return nil - } - if data := b.Get(val); data != nil { - return f(val, data) - } - if b.Bucket(val) != nil { - return f(val, nil) - } - return nil -} - -func stringNotEqualMatcher(key string, objVal []byte, filterVal string) bool { - return stringifyValue(key, objVal) != filterVal -} - -func stringNotEqualMatcherBucket(b *bbolt.Bucket, fKey string, fValue string, f func([]byte, []byte) error) error { - // Ignore the second return value because we check for strict inequality. - val, _, ok := destringifyValue(fKey, fValue, false) - return b.ForEach(func(k, v []byte) error { - if !ok || !bytes.Equal(val, k) { - return f(k, v) - } - return nil - }) -} - -func stringCommonPrefixMatcher(key string, objVal []byte, filterVal string) bool { - return strings.HasPrefix(stringifyValue(key, objVal), filterVal) -} - -func stringCommonPrefixMatcherBucket(b *bbolt.Bucket, fKey string, fVal string, f func([]byte, []byte) error) error { - val, checkLast, ok := destringifyValue(fKey, fVal, true) - if !ok { - return nil - } - - prefix := val - if checkLast { - prefix = val[:len(val)-1] - } - - if len(val) == 0 { - // empty common prefix, all the objects - // satisfy that filter - return b.ForEach(f) - } - - c := b.Cursor() - for k, v := c.Seek(val); bytes.HasPrefix(k, prefix); k, v = c.Next() { - if checkLast && (len(k) == len(prefix) || k[len(prefix)]>>4 != val[len(val)-1]) { - // If the last byte doesn't match, this means the prefix does no longer match, - // so we need to break here. - break - } - if err := f(k, v); err != nil { - return err - } - } - return nil -} - -func unknownMatcher(_ string, _ []byte, _ string) bool { - return false -} - -func unknownMatcherBucket(_ *bbolt.Bucket, _ string, _ string, _ func([]byte, []byte) error) error { - return nil -} - -// bucketKeyHelper returns byte representation of val that is used as a key -// in boltDB. Useful for getting filter values from unique and list indexes. -func bucketKeyHelper(hdr string, val string) []byte { - switch hdr { - case v2object.FilterHeaderParent, v2object.FilterHeaderECParent: - v, err := base58.Decode(val) - if err != nil { - return nil - } - return v - case v2object.FilterHeaderPayloadHash: - v, err := hex.DecodeString(val) - if err != nil { - return nil - } - - return v - case v2object.FilterHeaderSplitID: - s := objectSDK.NewSplitID() - - err := s.Parse(val) - if err != nil { - return nil - } - - return s.ToV2() - default: - return []byte(val) - } -} - -// SetLogger sets logger. It is used after the shard ID was generated to use it in logs. -func (db *DB) SetLogger(l *logger.Logger) { - db.log = l -} - -// SetParentID sets parent ID to nested components. It is used after the shard ID was generated to use it in logs. -func (db *DB) SetParentID(parentID string) { - db.metrics.SetParentID(parentID) -} - -// WithLogger returns option to set logger of DB. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - -// WithBoltDBOptions returns option to specify BoltDB options. -func WithBoltDBOptions(opts *bbolt.Options) Option { - return func(c *cfg) { - c.boltOptions = opts - } -} - -// WithPath returns option to set system path to Metabase. -func WithPath(path string) Option { - return func(c *cfg) { - c.info.Path = path - } -} - -// WithPermissions returns option to specify permission bits -// of Metabase system path. -func WithPermissions(perm fs.FileMode) Option { - return func(c *cfg) { - c.info.Permission = perm - } -} - -// WithMaxBatchSize returns option to specify maximum concurrent operations -// to be processed in a single transactions. -// This option is missing from `bbolt.Options` but is set right after DB is open. -func WithMaxBatchSize(s int) Option { - return func(c *cfg) { - if s != 0 { - c.boltBatchSize = s - } - } -} - -// WithMaxBatchDelay returns option to specify maximum time to wait before -// the batch of concurrent transactions is processed. -// This option is missing from `bbolt.Options` but is set right after DB is open. -func WithMaxBatchDelay(d time.Duration) Option { - return func(c *cfg) { - if d != 0 { - c.boltBatchDelay = d - } - } -} - -// WithEpochState return option to specify a source of current epoch height. -func WithEpochState(s EpochState) Option { - return func(c *cfg) { - c.epochState = s - } -} - -// WithMetrics returns option to specify metrics collector. -func WithMetrics(m Metrics) Option { - return func(c *cfg) { - c.metrics = m - } -} diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go deleted file mode 100644 index edaeb13c5..000000000 --- a/pkg/local_object_storage/metabase/db_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package meta_test - -import ( - "context" - "path/filepath" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -type epochState struct{ e uint64 } - -func (s epochState) CurrentEpoch() uint64 { - if s.e != 0 { - return s.e - } - - return 0 -} - -// saves "big" object in DB. -func putBig(db *meta.DB, obj *objectSDK.Object) error { - return metaPut(db, obj, nil) -} - -func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) { - res, err := metaSelect(db, cnr, fs, false) - require.NoError(t, err) - require.Len(t, res, len(exp)) - - for i := range exp { - require.Contains(t, res, exp[i]) - } -} - -func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) { - res, err := metaSelect(db, cnr, fs, useAttrIndex) - require.NoError(t, err) - require.Len(t, res, len(exp)) - - for i := range exp { - require.Contains(t, res, exp[i]) - } -} - -func newDB(t testing.TB, opts ...meta.Option) *meta.DB { - bdb := meta.New( - append([]meta.Option{ - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o600), - meta.WithEpochState(epochState{}), - }, opts...)..., - ) - - require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bdb.Init(context.Background())) - - return bdb -} - -func checkExpiredObjects(t *testing.T, db *meta.DB, f func(exp, nonExp *objectSDK.Object)) { - expObj := testutil.GenerateObject() - setExpiration(expObj, currEpoch-1) - - require.NoError(t, metaPut(db, expObj, nil)) - - nonExpObj := testutil.GenerateObject() - setExpiration(nonExpObj, currEpoch) - - require.NoError(t, metaPut(db, nonExpObj, nil)) - - f(expObj, nonExpObj) -} - -func setExpiration(o *objectSDK.Object, epoch uint64) { - var attr objectSDK.Attribute - - attr.SetKey(objectV2.SysAttributeExpEpoch) - attr.SetValue(strconv.FormatUint(epoch, 10)) - - o.SetAttributes(append(o.Attributes(), attr)...) -} diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go deleted file mode 100644 index 9a5a6e574..000000000 --- a/pkg/local_object_storage/metabase/delete.go +++ /dev/null @@ -1,597 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var errFailedToRemoveUniqueIndexes = errors.New("can't remove unique indexes") - -// DeletePrm groups the parameters of Delete operation. -type DeletePrm struct { - addrs []oid.Address -} - -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct { - phyCount uint64 - logicCount uint64 - userCount uint64 - phySize uint64 - logicSize uint64 - removedByCnrID map[cid.ID]ObjectCounters -} - -// LogicCount returns the number of removed logic -// objects. -func (d DeleteRes) LogicCount() uint64 { - return d.logicCount -} - -func (d DeleteRes) UserCount() uint64 { - return d.userCount -} - -// RemovedByCnrID returns the number of removed objects by container ID. -func (d DeleteRes) RemovedByCnrID() map[cid.ID]ObjectCounters { - return d.removedByCnrID -} - -// PhyCount returns the number of removed physical objects. -func (d DeleteRes) PhyCount() uint64 { - return d.phyCount -} - -// PhySize returns the size of removed physical objects. -func (d DeleteRes) PhySize() uint64 { - return d.phySize -} - -// LogicSize returns the size of removed logical objects. -func (d DeleteRes) LogicSize() uint64 { - return d.logicSize -} - -// SetAddresses is a Delete option to set the addresses of the objects to delete. -// -// Option is required. -func (p *DeletePrm) SetAddresses(addrs ...oid.Address) { - p.addrs = addrs -} - -type referenceNumber struct { - all, cur int - - obj *objectSDK.Object -} - -type referenceCounter map[string]*referenceNumber - -// Delete removed object records from metabase indexes. -func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { - var ( - startedAt = time.Now() - deleted = false - ) - defer func() { - db.metrics.AddMethodDuration("Delete", time.Since(startedAt), deleted) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Delete", - trace.WithAttributes( - attribute.Int("addr_count", len(prm.addrs)), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return DeleteRes{}, ErrDegradedMode - } else if db.mode.ReadOnly() { - return DeleteRes{}, ErrReadOnlyMode - } - - var err error - var res DeleteRes - - err = db.boltDB.Batch(func(tx *bbolt.Tx) error { - res, err = db.deleteGroup(tx, prm.addrs) - return err - }) - if err == nil { - deleted = true - for i := range prm.addrs { - storagelog.Write(ctx, db.log, - storagelog.AddressField(prm.addrs[i]), - storagelog.OpField("metabase DELETE")) - } - } - return res, metaerr.Wrap(err) -} - -// deleteGroup deletes object from the metabase. Handles removal of the -// references of the split objects. -func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) { - res := DeleteRes{ - removedByCnrID: make(map[cid.ID]ObjectCounters), - } - refCounter := make(referenceCounter, len(addrs)) - currEpoch := db.epochState.CurrentEpoch() - - for i := range addrs { - r, err := db.delete(tx, addrs[i], refCounter, currEpoch) - if err != nil { - return DeleteRes{}, err - } - - applyDeleteSingleResult(r, &res, addrs, i) - } - - if err := db.updateCountersDelete(tx, res); err != nil { - return DeleteRes{}, err - } - - for _, refNum := range refCounter { - if refNum.cur == refNum.all { - err := db.deleteObject(tx, refNum.obj, true) - if err != nil { - return DeleteRes{}, err - } - } - } - - return res, nil -} - -func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { - if res.phyCount > 0 { - err := db.decShardObjectCounter(tx, phy, res.phyCount) - if err != nil { - return fmt.Errorf("decrease phy object counter: %w", err) - } - } - - if res.logicCount > 0 { - err := db.decShardObjectCounter(tx, logical, res.logicCount) - if err != nil { - return fmt.Errorf("decrease logical object counter: %w", err) - } - } - - if res.userCount > 0 { - err := db.decShardObjectCounter(tx, user, res.userCount) - if err != nil { - return fmt.Errorf("decrease user object counter: %w", err) - } - } - - if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil { - return fmt.Errorf("decrease container object counter: %w", err) - } - return nil -} - -func applyDeleteSingleResult(r deleteSingleResult, res *DeleteRes, addrs []oid.Address, i int) { - if r.Phy { - if v, ok := res.removedByCnrID[addrs[i].Container()]; ok { - v.Phy++ - res.removedByCnrID[addrs[i].Container()] = v - } else { - res.removedByCnrID[addrs[i].Container()] = ObjectCounters{ - Phy: 1, - } - } - - res.phyCount++ - res.phySize += r.Size - } - - if r.Logic { - if v, ok := res.removedByCnrID[addrs[i].Container()]; ok { - v.Logic++ - res.removedByCnrID[addrs[i].Container()] = v - } else { - res.removedByCnrID[addrs[i].Container()] = ObjectCounters{ - Logic: 1, - } - } - - res.logicCount++ - res.logicSize += r.Size - } - - if r.User { - if v, ok := res.removedByCnrID[addrs[i].Container()]; ok { - v.User++ - res.removedByCnrID[addrs[i].Container()] = v - } else { - res.removedByCnrID[addrs[i].Container()] = ObjectCounters{ - User: 1, - } - } - - res.userCount++ - } -} - -type deleteSingleResult struct { - Phy bool - Logic bool - User bool - Size uint64 -} - -// delete removes object indexes from the metabase. Counts the references -// of the object that is being removed. -// The first return value indicates if an object has been removed. (removing a -// non-exist object is error-free). The second return value indicates if an -// object was available before the removal (for calculating the logical object -// counter). The third return value The fourth return value is removed object payload size. -func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter, currEpoch uint64) (deleteSingleResult, error) { - key := make([]byte, addressKeySize) - addrKey := addressKey(addr, key) - garbageBKT := tx.Bucket(garbageBucketName) - graveyardBKT := tx.Bucket(graveyardBucketName) - - removeAvailableObject := inGraveyardWithKey(addrKey, graveyardBKT, garbageBKT) == 0 - - // unmarshal object, work only with physically stored (raw == true) objects - obj, err := db.get(tx, addr, key, false, true, currEpoch) - if err != nil { - if client.IsErrObjectNotFound(err) { - addrKey = addressKey(addr, key) - if garbageBKT != nil { - err := garbageBKT.Delete(addrKey) - if err != nil { - return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) - } - } - return deleteSingleResult{}, nil - } - var siErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - if errors.As(err, &siErr) || errors.As(err, &ecErr) { - // if object is virtual (parent) then do nothing, it will be deleted with last child - // if object is erasure-coded it will be deleted with the last chunk presented on the shard - return deleteSingleResult{}, nil - } - - return deleteSingleResult{}, err - } - - addrKey = addressKey(addr, key) - // remove record from the garbage bucket - if garbageBKT != nil { - err := garbageBKT.Delete(addrKey) - if err != nil { - return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) - } - } - - // if object is an only link to a parent, then remove parent - if parent := obj.Parent(); parent != nil { - parAddr := object.AddressOf(parent) - sParAddr := addressKey(parAddr, key) - k := string(sParAddr) - - nRef, ok := refCounter[k] - if !ok { - nRef = &referenceNumber{ - all: parentLength(tx, parAddr), - obj: parent, - } - - refCounter[k] = nRef - } - - nRef.cur++ - } - - isUserObject := IsUserObject(obj) - - // remove object - err = db.deleteObject(tx, obj, false) - if err != nil { - return deleteSingleResult{}, fmt.Errorf("remove object: %w", err) - } - - if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil { - return deleteSingleResult{}, err - } - - return deleteSingleResult{ - Phy: true, - Logic: removeAvailableObject, - User: isUserObject && removeAvailableObject, - Size: obj.PayloadSize(), - }, nil -} - -func (db *DB) deleteObject( - tx *bbolt.Tx, - obj *objectSDK.Object, - isParent bool, -) error { - err := delUniqueIndexes(tx, obj, isParent) - if err != nil { - return errFailedToRemoveUniqueIndexes - } - - err = updateListIndexes(tx, obj, delListIndexItem) - if err != nil { - return fmt.Errorf("remove list indexes: %w", err) - } - - err = updateFKBTIndexes(tx, obj, delFKBTIndexItem) - if err != nil { - return fmt.Errorf("remove fake bucket tree indexes: %w", err) - } - - if isParent { - // remove record from the garbage bucket, because regular object deletion does nothing for virtual object - garbageBKT := tx.Bucket(garbageBucketName) - if garbageBKT != nil { - key := make([]byte, addressKeySize) - addrKey := addressKey(object.AddressOf(obj), key) - err := garbageBKT.Delete(addrKey) - if err != nil { - return fmt.Errorf("remove from garbage bucket: %w", err) - } - } - } - - return nil -} - -// parentLength returns amount of available children from parentid index. -func parentLength(tx *bbolt.Tx, addr oid.Address) int { - bucketName := make([]byte, bucketKeySize) - - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) - if bkt == nil { - return 0 - } - - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) - if err != nil { - return 0 - } - - return len(lst) -} - -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - bkt := tx.Bucket(item.name) - if bkt != nil { - return bkt.Delete(item.key) - } - return nil -} - -func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - bkt := tx.Bucket(item.name) - if bkt == nil { - return nil - } - - lst, err := decodeList(bkt.Get(item.key)) - if err != nil || len(lst) == 0 { - return nil - } - - // remove element from the list - for i := range lst { - if bytes.Equal(item.val, lst[i]) { - copy(lst[i:], lst[i+1:]) - lst = lst[:len(lst)-1] - break - } - } - - // if list empty, remove the key from bucket - if len(lst) == 0 { - return bkt.Delete(item.key) - } - - // if list is not empty, then update it - encodedLst, err := encodeList(lst) - if err != nil { - return err - } - - return bkt.Put(item.key, encodedLst) -} - -func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - bkt := tx.Bucket(item.name) - if bkt == nil { - return nil - } - - fkbtRoot := bkt.Bucket(item.key) - if fkbtRoot == nil { - return nil - } - - if err := fkbtRoot.Delete(item.val); err != nil { - return err - } - - if hasAnyItem(fkbtRoot) { - return nil - } - - if err := bkt.DeleteBucket(item.key); err != nil { - return err - } - - if hasAnyItem(bkt) { - return nil - } - - return tx.DeleteBucket(item.name) -} - -func hasAnyItem(b *bbolt.Bucket) bool { - var hasAnyItem bool - c := b.Cursor() - for k, _ := c.First(); k != nil; { - hasAnyItem = true - break - } - return hasAnyItem -} - -func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error { - addr := object.AddressOf(obj) - - objKey := objectKey(addr.Object(), make([]byte, objectKeySize)) - cnr := addr.Container() - bucketName := make([]byte, bucketKeySize) - - // add value to primary unique bucket - if !isParent { - switch obj.Type() { - case objectSDK.TypeRegular: - bucketName = primaryBucketName(cnr, bucketName) - case objectSDK.TypeTombstone: - bucketName = tombstoneBucketName(cnr, bucketName) - case objectSDK.TypeLock: - bucketName = bucketNameLockers(cnr, bucketName) - default: - return ErrUnknownObjectType - } - - if err := delUniqueIndexItem(tx, namedBucketItem{ - name: bucketName, - key: objKey, - }); err != nil { - return err - } - } else { - if err := delUniqueIndexItem(tx, namedBucketItem{ - name: parentBucketName(cnr, bucketName), - key: objKey, - }); err != nil { - return err - } - } - - if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index - name: smallBucketName(cnr, bucketName), - key: objKey, - }); err != nil { - return err - } - if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index - name: rootBucketName(cnr, bucketName), - key: objKey, - }); err != nil { - return err - } - - if expEpoch, ok := hasExpirationEpoch(obj); ok { - if err := delUniqueIndexItem(tx, namedBucketItem{ - name: expEpochToObjectBucketName, - key: expirationEpochKey(expEpoch, cnr, addr.Object()), - }); err != nil { - return err - } - if err := delUniqueIndexItem(tx, namedBucketItem{ - name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), - key: objKey, - }); err != nil { - return err - } - } - - return nil -} - -func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.Object, cnr cid.ID, refCounter referenceCounter) error { - ech := obj.ECHeader() - if ech == nil { - return nil - } - - hasAnyChunks := hasAnyECChunks(tx, ech, cnr) - // drop EC parent GC mark if current EC chunk is the last one - if !hasAnyChunks && garbageBKT != nil { - var ecParentAddress oid.Address - ecParentAddress.SetContainer(cnr) - ecParentAddress.SetObject(ech.Parent()) - addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize)) - err := garbageBKT.Delete(addrKey) - if err != nil { - return fmt.Errorf("remove EC parent from garbage bucket: %w", err) - } - } - - // also drop EC parent root info if current EC chunk is the last one - if !hasAnyChunks { - if err := delUniqueIndexItem(tx, namedBucketItem{ - name: rootBucketName(cnr, make([]byte, bucketKeySize)), - key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }); err != nil { - return err - } - } - - if ech.ParentSplitParentID() == nil { - return nil - } - - var splitParentAddress oid.Address - splitParentAddress.SetContainer(cnr) - splitParentAddress.SetObject(*ech.ParentSplitParentID()) - - if ref, ok := refCounter[string(addressKey(splitParentAddress, make([]byte, addressKeySize)))]; ok { - // linking object is already processing - // so just inform that one more reference was deleted - // split info and gc marks will be deleted after linking object delete - ref.cur++ - return nil - } - - if parentLength(tx, splitParentAddress) > 0 { - // linking object still exists, so leave split info and gc mark deletion for linking object processing - return nil - } - - // drop split parent gc mark - if garbageBKT != nil { - addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize)) - err := garbageBKT.Delete(addrKey) - if err != nil { - return fmt.Errorf("remove EC parent from garbage bucket: %w", err) - } - } - - // drop split info - return delUniqueIndexItem(tx, namedBucketItem{ - name: rootBucketName(cnr, make([]byte, bucketKeySize)), - key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), - }) -} - -func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { - data := getFromBucket(tx, ecInfoBucketName(cnr, make([]byte, bucketKeySize)), - objectKey(ech.Parent(), make([]byte, objectKeySize))) - return len(data) > 0 -} diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go deleted file mode 100644 index 884da23ff..000000000 --- a/pkg/local_object_storage/metabase/delete_ec_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "fmt" - "path/filepath" - "slices" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -func TestDeleteECObject_WithoutSplit(t *testing.T) { - t.Parallel() - - db := New( - WithPath(filepath.Join(t.TempDir(), "metabase")), - WithPermissions(0o600), - WithEpochState(epochState{uint64(12)}), - ) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - ecChunk := oidtest.ID() - ecParent := oidtest.ID() - tombstoneID := oidtest.ID() - - chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetID(ecChunk) - chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) - chunkObj.SetPayloadSize(uint64(10)) - chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0)) - - // put object with EC - - var prm PutPrm - prm.SetObject(chunkObj) - prm.SetStorageID([]byte("0/0")) - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - - var ecChunkAddress oid.Address - ecChunkAddress.SetContainer(cnr) - ecChunkAddress.SetObject(ecChunk) - - var ecParentAddress oid.Address - ecParentAddress.SetContainer(cnr) - ecParentAddress.SetObject(ecParent) - - var getPrm GetPrm - - getPrm.SetAddress(ecChunkAddress) - _, err = db.Get(context.Background(), getPrm) - require.NoError(t, err) - - var ecInfoError *objectSDK.ECInfoError - getPrm.SetAddress(ecParentAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, &ecInfoError) - require.True(t, len(ecInfoError.ECInfo().Chunks) == 1 && - ecInfoError.ECInfo().Chunks[0].Index == 0 && - ecInfoError.ECInfo().Chunks[0].Total == 3) - - // inhume EC parent (like Delete does) - - var inhumePrm InhumePrm - var tombAddress oid.Address - tombAddress.SetContainer(cnr) - tombAddress.SetObject(tombstoneID) - inhumePrm.SetAddresses(ecParentAddress) - inhumePrm.SetTombstoneAddress(tombAddress) - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - getPrm.SetAddress(ecParentAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - - getPrm.SetAddress(ecChunkAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - - // GC finds and deletes split, EC parent and EC chunk - - var garbageAddresses []oid.Address - var itPrm GarbageIterationPrm - itPrm.SetHandler(func(g GarbageObject) error { - garbageAddresses = append(garbageAddresses, g.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 2, len(garbageAddresses)) - require.True(t, slices.Contains(garbageAddresses, ecParentAddress)) - require.True(t, slices.Contains(garbageAddresses, ecChunkAddress)) - - var deletePrm DeletePrm - deletePrm.SetAddresses(garbageAddresses...) - _, err = db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - - garbageAddresses = nil - itPrm.SetHandler(func(g GarbageObject) error { - garbageAddresses = append(garbageAddresses, g.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 0, len(garbageAddresses)) - - // after tombstone expired GC inhumes tombstone and drops graves - - var tombstonedObjects []TombstonedObject - var graveyardIterationPrm GraveyardIterationPrm - graveyardIterationPrm.SetHandler(func(object TombstonedObject) error { - tombstonedObjects = append(tombstonedObjects, object) - return nil - }) - require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm)) - require.Equal(t, 2, len(tombstonedObjects)) - - _, err = db.InhumeTombstones(context.Background(), tombstonedObjects) - require.NoError(t, err) - - // GC finds tombstone as garbage and deletes it - - garbageAddresses = nil - itPrm.SetHandler(func(g GarbageObject) error { - garbageAddresses = append(garbageAddresses, g.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 1, len(garbageAddresses)) - require.Equal(t, tombstoneID, garbageAddresses[0].Object()) - - deletePrm.SetAddresses(garbageAddresses...) - _, err = db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - - // no more objects should left as garbage - - itPrm.SetHandler(func(g GarbageObject) error { - require.FailNow(t, "no garbage objects should left") - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - - require.NoError(t, db.boltDB.View(testVerifyNoObjectDataLeft)) - - require.NoError(t, testCountersAreZero(db, cnr)) -} - -func TestDeleteECObject_WithSplit(t *testing.T) { - t.Parallel() - for _, c := range []int{1, 2, 3} { - for _, l := range []bool{true, false} { - test := fmt.Sprintf("%d EC chunks with split info without linking object", c) - if l { - test = fmt.Sprintf("%d EC chunks with split info with linking object", c) - } - t.Run(test, func(t *testing.T) { - testDeleteECObjectWithSplit(t, c, l) - }) - } - } -} - -func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool) { - t.Parallel() - - db := New( - WithPath(filepath.Join(t.TempDir(), "metabase")), - WithPermissions(0o600), - WithEpochState(epochState{uint64(12)}), - ) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - ecChunks := make([]oid.ID, chunksCount) - for idx := range ecChunks { - ecChunks[idx] = oidtest.ID() - } - ecParentID := oidtest.ID() - splitParentID := oidtest.ID() - tombstoneID := oidtest.ID() - splitID := objectSDK.NewSplitID() - linkingID := oidtest.ID() - - ecChunkObjects := make([]*objectSDK.Object, chunksCount) - for idx := range ecChunkObjects { - ecChunkObjects[idx] = testutil.GenerateObjectWithCID(cnr) - ecChunkObjects[idx].SetContainerID(cnr) - ecChunkObjects[idx].SetID(ecChunks[idx]) - ecChunkObjects[idx].SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) - ecChunkObjects[idx].SetPayloadSize(uint64(10)) - ecChunkObjects[idx].SetECHeader(objectSDK.NewECHeader( - objectSDK.ECParentInfo{ - ID: ecParentID, - SplitParentID: &splitParentID, SplitID: splitID, - }, uint32(idx), uint32(chunksCount+1), []byte{}, 0)) - } - - splitParentObj := testutil.GenerateObjectWithCID(cnr) - splitParentObj.SetID(splitParentID) - - var linkingAddress oid.Address - linkingAddress.SetContainer(cnr) - linkingAddress.SetObject(linkingID) - - linkingObj := testutil.GenerateObjectWithCID(cnr) - linkingObj.SetID(linkingID) - linkingObj.SetParent(splitParentObj) - linkingObj.SetParentID(splitParentID) - linkingObj.SetChildren(ecParentID, oidtest.ID(), oidtest.ID()) - linkingObj.SetSplitID(splitID) - - // put object with EC and split info - - var prm PutPrm - prm.SetStorageID([]byte("0/0")) - for _, obj := range ecChunkObjects { - prm.SetObject(obj) - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - } - - if withLinking { - prm.SetObject(linkingObj) - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - } - - var ecParentAddress oid.Address - ecParentAddress.SetContainer(cnr) - ecParentAddress.SetObject(ecParentID) - - var getPrm GetPrm - var ecInfoError *objectSDK.ECInfoError - getPrm.SetAddress(ecParentAddress) - _, err := db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, &ecInfoError) - require.True(t, len(ecInfoError.ECInfo().Chunks) == chunksCount) - - var splitParentAddress oid.Address - splitParentAddress.SetContainer(cnr) - splitParentAddress.SetObject(splitParentID) - - var splitInfoError *objectSDK.SplitInfoError - getPrm.SetAddress(splitParentAddress) - getPrm.SetRaw(true) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, &splitInfoError) - require.True(t, splitInfoError.SplitInfo() != nil) - require.Equal(t, splitID, splitInfoError.SplitInfo().SplitID()) - lastPart, set := splitInfoError.SplitInfo().LastPart() - require.True(t, set) - require.Equal(t, lastPart, ecParentID) - if withLinking { - l, ok := splitInfoError.SplitInfo().Link() - require.True(t, ok) - require.Equal(t, linkingID, l) - } - getPrm.SetRaw(false) - - // inhume EC parent and split objects (like Delete does) - - inhumeAddresses := []oid.Address{splitParentAddress, ecParentAddress} - if withLinking { - inhumeAddresses = append(inhumeAddresses, linkingAddress) - } - - var inhumePrm InhumePrm - var tombAddress oid.Address - tombAddress.SetContainer(cnr) - tombAddress.SetObject(tombstoneID) - inhumePrm.SetAddresses(inhumeAddresses...) - inhumePrm.SetTombstoneAddress(tombAddress) - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - getPrm.SetAddress(ecParentAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - - getPrm.SetAddress(splitParentAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - - if withLinking { - getPrm.SetAddress(linkingAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - } - - for _, id := range ecChunks { - var ecChunkAddress oid.Address - ecChunkAddress.SetContainer(cnr) - ecChunkAddress.SetObject(id) - getPrm.SetAddress(ecChunkAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - } - - // GC finds and deletes split, EC parent and EC chunks - - parentCount := 2 // split + ec - if withLinking { - parentCount = 3 - } - - var garbageAddresses []oid.Address - var itPrm GarbageIterationPrm - itPrm.SetHandler(func(g GarbageObject) error { - garbageAddresses = append(garbageAddresses, g.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, parentCount+chunksCount, len(garbageAddresses)) - require.True(t, slices.Contains(garbageAddresses, splitParentAddress)) - require.True(t, slices.Contains(garbageAddresses, ecParentAddress)) - if withLinking { - require.True(t, slices.Contains(garbageAddresses, linkingAddress)) - } - for _, id := range ecChunks { - var ecChunkAddress oid.Address - ecChunkAddress.SetContainer(cnr) - ecChunkAddress.SetObject(id) - require.True(t, slices.Contains(garbageAddresses, ecChunkAddress)) - } - - var deletePrm DeletePrm - deletePrm.SetAddresses(garbageAddresses...) - _, err = db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - - var garbageStub []oid.Address - itPrm.SetHandler(func(g GarbageObject) error { - garbageStub = append(garbageStub, g.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 0, len(garbageStub)) - - // after tombstone expired GC inhumes tombstone and drops graves - - var tombstonedObjects []TombstonedObject - var graveyardIterationPrm GraveyardIterationPrm - graveyardIterationPrm.SetHandler(func(object TombstonedObject) error { - tombstonedObjects = append(tombstonedObjects, object) - return nil - }) - require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm)) - require.True(t, len(tombstonedObjects) == parentCount+chunksCount) - - _, err = db.InhumeTombstones(context.Background(), tombstonedObjects) - require.NoError(t, err) - - // GC finds tombstone as garbage and deletes it - - garbageAddresses = nil - itPrm.SetHandler(func(g GarbageObject) error { - garbageAddresses = append(garbageAddresses, g.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 1, len(garbageAddresses)) - require.Equal(t, tombstoneID, garbageAddresses[0].Object()) - - deletePrm.SetAddresses(garbageAddresses...) - _, err = db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - - // no more objects should left as garbage - - itPrm.SetHandler(func(g GarbageObject) error { - require.FailNow(t, "no garbage objects should left") - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - - require.NoError(t, db.boltDB.View(testVerifyNoObjectDataLeft)) - - require.NoError(t, testCountersAreZero(db, cnr)) -} - -func testVerifyNoObjectDataLeft(tx *bbolt.Tx) error { - return tx.ForEach(func(name []byte, b *bbolt.Bucket) error { - if bytes.Equal(name, shardInfoBucket) || - bytes.Equal(name, containerCounterBucketName) || - bytes.Equal(name, containerVolumeBucketName) || - bytes.Equal(name, expEpochToObjectBucketName) { - return nil - } - return testBucketEmpty(name, b) - }) -} - -func testBucketEmpty(name []byte, b *bbolt.Bucket) error { - err := b.ForEach(func(k, v []byte) error { - if len(v) > 0 { - return fmt.Errorf("bucket %v is not empty", name) - } - return nil - }) - if err != nil { - return err - } - return b.ForEachBucket(func(k []byte) error { - return testBucketEmpty(k, b.Bucket(k)) - }) -} - -func testCountersAreZero(db *DB, cnr cid.ID) error { - c, err := db.ContainerCount(context.Background(), cnr) - if err != nil { - return err - } - if !c.IsZero() { - return fmt.Errorf("container %s has non zero counters", cnr.EncodeToString()) - } - s, err := db.ContainerSize(cnr) - if err != nil { - return err - } - if s != 0 { - return fmt.Errorf("container %s has non zero size", cnr.EncodeToString()) - } - return nil -} diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go deleted file mode 100644 index 0329e3a73..000000000 --- a/pkg/local_object_storage/metabase/delete_meta_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "path/filepath" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -func TestPutDeleteIndexAttributes(t *testing.T) { - db := New([]Option{ - WithPath(filepath.Join(t.TempDir(), "metabase")), - WithPermissions(0o600), - WithEpochState(epochState{}), - }...) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - obj1 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name") - testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object") - - var putPrm PutPrm - putPrm.SetObject(obj1) - - _, err := db.Put(context.Background(), putPrm) - require.NoError(t, err) - - require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) - require.Nil(t, b) - b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) - require.Nil(t, b) - return nil - })) - - obj2 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name") - testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object") - - putPrm.SetObject(obj2) - putPrm.SetIndexAttributes(true) - - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize)) - require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) - require.NotNil(t, b) - b = b.Bucket([]byte("CRDT-Name")) - require.NotNil(t, b) - require.True(t, bytes.Equal(zeroValue, b.Get(objKey))) - b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) - require.NotNil(t, b) - b = b.Bucket([]byte("/path/to/object")) - require.NotNil(t, b) - require.True(t, bytes.Equal(zeroValue, b.Get(objKey))) - return nil - })) - - var dPrm DeletePrm - dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2)) - _, err = db.Delete(context.Background(), dPrm) - require.NoError(t, err) - - require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) - require.Nil(t, b) - b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) - require.Nil(t, b) - return nil - })) -} diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go deleted file mode 100644 index c0762a377..000000000 --- a/pkg/local_object_storage/metabase/delete_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package meta_test - -import ( - "context" - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDB_Delete(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - parent := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(parent, "foo", "bar") - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - - // put object with parent - err := putBig(db, child) - require.NoError(t, err) - - // try to remove parent, should be no-op, error-free - err = metaDelete(db, object.AddressOf(parent)) - require.NoError(t, err) - - // inhume parent and child so they will be on graveyard - ts := testutil.GenerateObjectWithCID(cnr) - - err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object()) - require.NoError(t, err) - - ts = testutil.GenerateObjectWithCID(cnr) - - err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object()) - require.NoError(t, err) - - // delete object - err = metaDelete(db, object.AddressOf(child)) - require.NoError(t, err) - - // check if they marked as already removed - - ok, err := metaExists(db, object.AddressOf(child)) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - require.False(t, ok) - - ok, err = metaExists(db, object.AddressOf(parent)) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - require.False(t, ok) -} - -func TestDeleteAllChildren(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - // generate parent object - parent := testutil.GenerateObjectWithCID(cnr) - - // generate 2 children - child1 := testutil.GenerateObjectWithCID(cnr) - child1.SetParent(parent) - idParent, _ := parent.ID() - child1.SetParentID(idParent) - - child2 := testutil.GenerateObjectWithCID(cnr) - child2.SetParent(parent) - child2.SetParentID(idParent) - - // put children - require.NoError(t, putBig(db, child1)) - require.NoError(t, putBig(db, child2)) - - // Exists should return split info for parent - _, err := metaExists(db, object.AddressOf(parent)) - siErr := objectSDK.NewSplitInfoError(nil) - require.True(t, errors.As(err, &siErr)) - - // remove all children in single call - err = metaDelete(db, object.AddressOf(child1), object.AddressOf(child2)) - require.NoError(t, err) - - // parent should not be found now - ex, err := metaExists(db, object.AddressOf(parent)) - require.NoError(t, err) - require.False(t, ex) -} - -func TestGraveOnlyDelete(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - addr := oidtest.Address() - - // inhume non-existent object by address - require.NoError(t, metaInhume(db, addr, oidtest.ID())) - - // delete the object data - require.NoError(t, metaDelete(db, addr)) -} - -func TestExpiredObject(t *testing.T) { - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { - // removing expired object should be error-free - require.NoError(t, metaDelete(db, object.AddressOf(exp))) - - require.NoError(t, metaDelete(db, object.AddressOf(nonExp))) - }) -} - -func TestDelete(t *testing.T) { - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - for range 10 { - obj := testutil.GenerateObjectWithCID(cnr) - - var prm meta.PutPrm - prm.SetObject(obj) - prm.SetStorageID([]byte("0/0")) - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(object.AddressOf(obj)) - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - } - - var addrs []oid.Address - var iprm meta.GarbageIterationPrm - iprm.SetHandler(func(o meta.GarbageObject) error { - addrs = append(addrs, o.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), iprm)) - require.Equal(t, 10, len(addrs)) - var deletePrm meta.DeletePrm - deletePrm.SetAddresses(addrs...) - _, err := db.Delete(context.Background(), deletePrm) - require.NoError(t, err) - - addrs = nil - iprm.SetHandler(func(o meta.GarbageObject) error { - addrs = append(addrs, o.Address()) - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), iprm)) - require.Equal(t, 0, len(addrs)) -} - -func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) { - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - addr := oidtest.Address() - - var prm meta.InhumePrm - prm.SetAddresses(addr) - prm.SetGCMark() - _, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) - - var garbageCount int - var itPrm meta.GarbageIterationPrm - itPrm.SetHandler(func(g meta.GarbageObject) error { - garbageCount++ - return nil - }) - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 1, garbageCount) - - var delPrm meta.DeletePrm - delPrm.SetAddresses(addr) - _, err = db.Delete(context.Background(), delPrm) - require.NoError(t, err) - - garbageCount = 0 - require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm)) - require.Equal(t, 0, garbageCount) -} - -func metaDelete(db *meta.DB, addrs ...oid.Address) error { - var deletePrm meta.DeletePrm - deletePrm.SetAddresses(addrs...) - - _, err := db.Delete(context.Background(), deletePrm) - return err -} diff --git a/pkg/local_object_storage/metabase/errors.go b/pkg/local_object_storage/metabase/errors.go deleted file mode 100644 index e9ffab439..000000000 --- a/pkg/local_object_storage/metabase/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package meta - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" -) - -// ErrObjectIsExpired is returned when the requested object's -// epoch is less than the current one. Such objects are considered -// as removed and should not be returned from the Storage Engine. -var ErrObjectIsExpired = logicerr.New("object is expired") diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go deleted file mode 100644 index 7bd6f90a6..000000000 --- a/pkg/local_object_storage/metabase/exists.go +++ /dev/null @@ -1,239 +0,0 @@ -package meta - -import ( - "context" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// ExistsPrm groups the parameters of Exists operation. -type ExistsPrm struct { - addr oid.Address - ecParentAddr oid.Address -} - -// ExistsRes groups the resulting values of Exists operation. -type ExistsRes struct { - exists bool - locked bool -} - -var ErrLackSplitInfo = logicerr.New("no split info on parent object") - -// SetAddress is an Exists option to set object checked for existence. -func (p *ExistsPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetECParent is an Exists option to set objects parent. -func (p *ExistsPrm) SetECParent(addr oid.Address) { - p.ecParentAddr = addr -} - -// Exists returns the fact that the object is in the metabase. -func (p ExistsRes) Exists() bool { - return p.exists -} - -// Locked returns the fact that the object is locked. -func (p ExistsRes) Locked() bool { - return p.locked -} - -// Exists returns ErrAlreadyRemoved if addr was marked as removed. Otherwise it -// returns true if addr is in primary index or false if it is not. -// -// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard. -// Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Exists", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Exists", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - - currEpoch := db.epochState.CurrentEpoch() - - err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch) - - return err - }) - success = err == nil - return res, metaerr.Wrap(err) -} - -func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) { - var locked bool - if !ecParent.Equals(oid.Address{}) { - st, err := objectStatus(tx, ecParent, currEpoch) - if err != nil { - return false, false, err - } - switch st { - case 2: - return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved)) - case 3: - return false, locked, ErrObjectIsExpired - } - - locked = objectLocked(tx, ecParent.Container(), ecParent.Object()) - } - // check graveyard and object expiration first - st, err := objectStatus(tx, addr, currEpoch) - if err != nil { - return false, false, err - } - switch st { - case 1: - return false, locked, logicerr.Wrap(new(apistatus.ObjectNotFound)) - case 2: - return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved)) - case 3: - return false, locked, ErrObjectIsExpired - } - - objKey := objectKey(addr.Object(), make([]byte, objectKeySize)) - - cnr := addr.Container() - key := make([]byte, bucketKeySize) - - // if graveyard is empty, then check if object exists in primary bucket - if inBucket(tx, primaryBucketName(cnr, key), objKey) { - return true, locked, nil - } - - // if primary bucket is empty, then check if object exists in parent bucket - if inBucket(tx, parentBucketName(cnr, key), objKey) { - splitInfo, err := getSplitInfo(tx, cnr, objKey) - if err != nil { - return false, locked, err - } - - return false, locked, logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo)) - } - // if parent bucket is empty, then check if object exists in ec bucket - if data := getFromBucket(tx, ecInfoBucketName(cnr, key), objKey); len(data) != 0 { - return false, locked, getECInfoError(tx, cnr, data) - } - - // if parent bucket is empty, then check if object exists in typed buckets - return firstIrregularObjectType(tx, cnr, objKey) != objectSDK.TypeRegular, locked, nil -} - -// objectStatus returns: -// - 0 if object is available; -// - 1 if object with GC mark; -// - 2 if object is covered with tombstone; -// - 3 if object is expired. -func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { - return objectStatusWithCache(nil, tx, addr, currEpoch) -} - -func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { - // locked object could not be removed/marked with GC/expired - if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { - return 0, nil - } - - expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) - if err != nil { - return 0, err - } - - if expired { - return 3, nil - } - - graveyardBkt := getGraveyardBucket(bc, tx) - garbageBkt := getGarbageBucket(bc, tx) - addrKey := addressKey(addr, make([]byte, addressKeySize)) - return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil -} - -func inGraveyardWithKey(addrKey []byte, graveyard, garbageBCK *bbolt.Bucket) uint8 { - if graveyard == nil { - // incorrect metabase state, does not make - // sense to check garbage bucket - return 0 - } - - val := graveyard.Get(addrKey) - if val == nil { - if garbageBCK == nil { - // incorrect node state - return 0 - } - - val = garbageBCK.Get(addrKey) - if val != nil { - // object has been marked with GC - return 1 - } - - // neither in the graveyard - // nor was marked with GC mark - return 0 - } - - // object in the graveyard - return 2 -} - -// inBucket checks if key is present in bucket . -func inBucket(tx *bbolt.Tx, name, key []byte) bool { - bkt := tx.Bucket(name) - if bkt == nil { - return false - } - - // using `get` as `exists`: https://github.com/boltdb/bolt/issues/321 - val := bkt.Get(key) - - return len(val) != 0 -} - -// getSplitInfo returns SplitInfo structure from root index. Returns error -// if there is no `key` record in root index. -func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, error) { - bucketName := rootBucketName(cnr, make([]byte, bucketKeySize)) - rawSplitInfo := getFromBucket(tx, bucketName, key) - if len(rawSplitInfo) == 0 { - return nil, ErrLackSplitInfo - } - - splitInfo := objectSDK.NewSplitInfo() - - err := splitInfo.Unmarshal(rawSplitInfo) - if err != nil { - return nil, fmt.Errorf("unmarshal split info from root index: %w", err) - } - - return splitInfo, nil -} diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go deleted file mode 100644 index 3045e17f1..000000000 --- a/pkg/local_object_storage/metabase/exists_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package meta_test - -import ( - "context" - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -const currEpoch = 1000 - -func TestDB_Exists(t *testing.T) { - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - t.Run("no object", func(t *testing.T) { - nonExist := testutil.GenerateObject() - exists, err := metaExists(db, object.AddressOf(nonExist)) - require.NoError(t, err) - require.False(t, exists) - }) - - t.Run("regular object", func(t *testing.T) { - regular := testutil.GenerateObject() - err := putBig(db, regular) - require.NoError(t, err) - - exists, err := metaExists(db, object.AddressOf(regular)) - require.NoError(t, err) - require.True(t, exists) - - t.Run("removed object", func(t *testing.T) { - err := metaInhume(db, object.AddressOf(regular), oidtest.ID()) - require.NoError(t, err) - - exists, err := metaExists(db, object.AddressOf(regular)) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - require.False(t, exists) - }) - }) - - t.Run("tombstone object", func(t *testing.T) { - ts := testutil.GenerateObject() - ts.SetType(objectSDK.TypeTombstone) - - err := putBig(db, ts) - require.NoError(t, err) - - exists, err := metaExists(db, object.AddressOf(ts)) - require.NoError(t, err) - require.True(t, exists) - }) - - t.Run("lock object", func(t *testing.T) { - lock := testutil.GenerateObject() - lock.SetType(objectSDK.TypeLock) - - err := putBig(db, lock) - require.NoError(t, err) - - exists, err := metaExists(db, object.AddressOf(lock)) - require.NoError(t, err) - require.True(t, exists) - }) - - t.Run("virtual object", func(t *testing.T) { - cnr := cidtest.ID() - parent := testutil.GenerateObjectWithCID(cnr) - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - - err := putBig(db, child) - require.NoError(t, err) - - _, err = metaExists(db, object.AddressOf(parent)) - - var expectedErr *objectSDK.SplitInfoError - require.True(t, errors.As(err, &expectedErr)) - }) - - t.Run("merge split info", func(t *testing.T) { - cnr := cidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(parent, "foo", "bar") - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - child.SetSplitID(splitID) - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(idParent) - idChild, _ := child.ID() - link.SetChildren(idChild) - link.SetSplitID(splitID) - - t.Run("direct order", func(t *testing.T) { - err := putBig(db, child) - require.NoError(t, err) - - err = putBig(db, link) - require.NoError(t, err) - - _, err = metaExists(db, object.AddressOf(parent)) - require.Error(t, err) - - var si *objectSDK.SplitInfoError - require.ErrorAs(t, err, &si) - require.Equal(t, splitID, si.SplitInfo().SplitID()) - - id1, _ := child.ID() - id2, _ := si.SplitInfo().LastPart() - require.Equal(t, id1, id2) - - id1, _ = link.ID() - id2, _ = si.SplitInfo().Link() - require.Equal(t, id1, id2) - }) - - t.Run("reverse order", func(t *testing.T) { - err := metaPut(db, link, nil) - require.NoError(t, err) - - err = putBig(db, child) - require.NoError(t, err) - - _, err = metaExists(db, object.AddressOf(parent)) - require.Error(t, err) - - var si *objectSDK.SplitInfoError - require.ErrorAs(t, err, &si) - require.Equal(t, splitID, si.SplitInfo().SplitID()) - - id1, _ := child.ID() - id2, _ := si.SplitInfo().LastPart() - require.Equal(t, id1, id2) - - id1, _ = link.ID() - id2, _ = si.SplitInfo().Link() - require.Equal(t, id1, id2) - }) - }) - - t.Run("random object", func(t *testing.T) { - addr := oidtest.Address() - - exists, err := metaExists(db, addr) - require.NoError(t, err) - require.False(t, exists) - }) - - t.Run("expired object", func(t *testing.T) { - checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { - gotObj, err := metaExists(db, object.AddressOf(exp)) - require.False(t, gotObj) - require.ErrorIs(t, err, meta.ErrObjectIsExpired) - - gotObj, err = metaExists(db, object.AddressOf(nonExp)) - require.NoError(t, err) - require.True(t, gotObj) - }) - }) -} diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go deleted file mode 100644 index a1351cb6f..000000000 --- a/pkg/local_object_storage/metabase/expired.go +++ /dev/null @@ -1,113 +0,0 @@ -package meta - -import ( - "context" - "encoding/binary" - "errors" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var errInvalidEpochValueLength = errors.New("could not parse expiration epoch: invalid data length") - -// FilterExpired return expired items from addresses. -// Address considered expired if metabase does contain information about expiration and -// expiration epoch is less than epoch. -func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - var ( - startedAt = time.Now() - success = true - ) - defer func() { - db.metrics.AddMethodDuration("FilterExpired", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.FilterExpired", - trace.WithAttributes( - attribute.String("epoch", strconv.FormatUint(epoch, 10)), - attribute.Int("addr_count", len(addresses)), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - result := make([]oid.Address, 0, len(addresses)) - containerIDToObjectIDs := make(map[cid.ID][]oid.ID) - for _, addr := range addresses { - containerIDToObjectIDs[addr.Container()] = append(containerIDToObjectIDs[addr.Container()], addr.Object()) - } - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - for containerID, objectIDs := range containerIDToObjectIDs { - select { - case <-ctx.Done(): - return ErrInterruptIterator - default: - } - - expired, err := selectExpiredObjects(tx, epoch, containerID, objectIDs) - if err != nil { - return err - } - result = append(result, expired...) - } - return nil - }) - if err != nil { - return nil, metaerr.Wrap(err) - } - success = true - return result, nil -} - -func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - return isExpiredWithCache(nil, tx, addr, currEpoch) -} - -func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - b := getExpiredBucket(bc, tx, addr.Container()) - if b == nil { - return false, nil - } - key := make([]byte, objectKeySize) - addr.Object().Encode(key) - val := b.Get(key) - if len(val) == 0 { - return false, nil - } - if len(val) != epochSize { - return false, errInvalidEpochValueLength - } - expEpoch := binary.LittleEndian.Uint64(val) - return expEpoch < currEpoch, nil -} - -func selectExpiredObjects(tx *bbolt.Tx, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.Address, error) { - result := make([]oid.Address, 0) - var addr oid.Address - addr.SetContainer(containerID) - for _, objID := range objectIDs { - addr.SetObject(objID) - expired, err := isExpired(tx, addr, epoch) - if err != nil { - return nil, err - } - if expired { - result = append(result, addr) - } - } - return result, nil -} diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go deleted file mode 100644 index 495c1eee7..000000000 --- a/pkg/local_object_storage/metabase/expired_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestDB_SelectExpired(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - containerID1 := cidtest.ID() - - expiredObj11 := testutil.GenerateObject() - expiredObj11.SetContainerID(containerID1) - setExpiration(expiredObj11, 10) - err := putBig(db, expiredObj11) - require.NoError(t, err) - - expiredObj12 := testutil.GenerateObject() - expiredObj12.SetContainerID(containerID1) - setExpiration(expiredObj12, 12) - err = putBig(db, expiredObj12) - require.NoError(t, err) - - notExpiredObj11 := testutil.GenerateObject() - notExpiredObj11.SetContainerID(containerID1) - setExpiration(notExpiredObj11, 20) - err = putBig(db, notExpiredObj11) - require.NoError(t, err) - - regularObj11 := testutil.GenerateObject() - regularObj11.SetContainerID(containerID1) - err = putBig(db, regularObj11) - require.NoError(t, err) - - containerID2 := cidtest.ID() - - expiredObj21 := testutil.GenerateObject() - expiredObj21.SetContainerID(containerID2) - setExpiration(expiredObj21, 10) - err = putBig(db, expiredObj21) - require.NoError(t, err) - - expiredObj22 := testutil.GenerateObject() - expiredObj22.SetContainerID(containerID2) - setExpiration(expiredObj22, 12) - err = putBig(db, expiredObj22) - require.NoError(t, err) - - notExpiredObj21 := testutil.GenerateObject() - notExpiredObj21.SetContainerID(containerID2) - setExpiration(notExpiredObj21, 20) - err = putBig(db, notExpiredObj21) - require.NoError(t, err) - - regularObj21 := testutil.GenerateObject() - regularObj21.SetContainerID(containerID2) - err = putBig(db, regularObj21) - require.NoError(t, err) - - expired, err := db.FilterExpired(context.Background(), 15, - []oid.Address{ - getAddressSafe(t, expiredObj11), getAddressSafe(t, expiredObj12), getAddressSafe(t, notExpiredObj11), getAddressSafe(t, regularObj11), - getAddressSafe(t, expiredObj21), getAddressSafe(t, expiredObj22), getAddressSafe(t, notExpiredObj21), getAddressSafe(t, regularObj21), - }) - require.NoError(t, err) - require.Equal(t, 4, len(expired), "invalid expired count") - require.Contains(t, expired, getAddressSafe(t, expiredObj11)) - require.Contains(t, expired, getAddressSafe(t, expiredObj12)) - require.Contains(t, expired, getAddressSafe(t, expiredObj21)) - require.Contains(t, expired, getAddressSafe(t, expiredObj22)) -} - -func getAddressSafe(t *testing.T, o *objectSDK.Object) oid.Address { - cid, set := o.ContainerID() - if !set { - t.Fatalf("container id required") - } - id, set := o.ID() - if !set { - t.Fatalf("object id required") - } - var addr oid.Address - addr.SetContainer(cid) - addr.SetObject(id) - return addr -} diff --git a/pkg/local_object_storage/metabase/generic_test.go b/pkg/local_object_storage/metabase/generic_test.go deleted file mode 100644 index 52581b2a0..000000000 --- a/pkg/local_object_storage/metabase/generic_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package meta - -import ( - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest" -) - -func TestGeneric(t *testing.T) { - t.Parallel() - - newMetabase := func(t *testing.T) storagetest.Component { - return New( - WithEpochState(epochStateImpl{}), - WithPath(filepath.Join(t.TempDir(), "metabase"))) - } - - storagetest.TestAll(t, newMetabase) -} diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go deleted file mode 100644 index 821810c09..000000000 --- a/pkg/local_object_storage/metabase/get.go +++ /dev/null @@ -1,238 +0,0 @@ -package meta - -import ( - "context" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// GetPrm groups the parameters of Get operation. -type GetPrm struct { - addr oid.Address - raw bool -} - -// GetRes groups the resulting values of Get operation. -type GetRes struct { - hdr *objectSDK.Object -} - -// SetAddress is a Get option to set the address of the requested object. -// -// Option is required. -func (p *GetPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetRaw is a Get option to set raw flag value. If flag is unset, then Get -// returns header of virtual object, otherwise it returns SplitInfo of virtual -// object. -func (p *GetPrm) SetRaw(raw bool) { - p.raw = raw -} - -// Header returns the requested object header. -func (r GetRes) Header() *objectSDK.Object { - return r.hdr -} - -// Get returns object header for specified address. -// -// Returns an error of type apistatus.ObjectNotFound if object is missing in DB. -// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard. -// Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Get", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Get", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - attribute.Bool("raw", prm.raw), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - - currEpoch := db.epochState.CurrentEpoch() - - err = db.boltDB.View(func(tx *bbolt.Tx) error { - key := make([]byte, addressKeySize) - res.hdr, err = db.get(tx, prm.addr, key, true, prm.raw, currEpoch) - - return err - }) - success = err == nil - return res, metaerr.Wrap(err) -} - -func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { - return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch) -} - -func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { - if checkStatus { - st, err := objectStatusWithCache(bc, tx, addr, currEpoch) - if err != nil { - return nil, err - } - switch st { - case 1: - return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) - case 2: - return nil, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved)) - case 3: - return nil, ErrObjectIsExpired - } - } - - key = objectKey(addr.Object(), key) - cnr := addr.Container() - obj := objectSDK.New() - bucketName := make([]byte, bucketKeySize) - - // check in primary index - if b := getPrimaryBucket(bc, tx, cnr); b != nil { - if data := b.Get(key); len(data) != 0 { - return obj, obj.Unmarshal(data) - } - } - - data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) - if len(data) != 0 { - return nil, getECInfoError(tx, cnr, data) - } - - // if not found then check in tombstone index - data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(data) - } - - // if not found then check in locker index - data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(data) - } - - // if not found then check if object is a virtual - return getVirtualObject(tx, cnr, key, raw) -} - -func getFromBucket(tx *bbolt.Tx, name, key []byte) []byte { - bkt := tx.Bucket(name) - if bkt == nil { - return nil - } - - return bkt.Get(key) -} - -func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSDK.Object, error) { - if raw { - return nil, getSplitInfoError(tx, cnr, key) - } - - bucketName := make([]byte, bucketKeySize) - parentBucket := tx.Bucket(parentBucketName(cnr, bucketName)) - if parentBucket == nil { - return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - relativeLst, err := decodeList(parentBucket.Get(key)) - if err != nil { - return nil, err - } - - if len(relativeLst) == 0 { // this should never happen though - return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - var data []byte - for i := 0; i < len(relativeLst) && len(data) == 0; i++ { - virtualOID := relativeLst[len(relativeLst)-i-1] - data = getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID) - } - - if len(data) == 0 { - // check if any of the relatives is an EC object - for _, relative := range relativeLst { - data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), relative) - if len(data) > 0 { - // we can't return object headers, but can return error, - // so assembler can try to assemble complex object - return nil, getSplitInfoError(tx, cnr, key) - } - } - } - - child := objectSDK.New() - - err = child.Unmarshal(data) - if err != nil { - return nil, fmt.Errorf("unmarshal child with parent: %w", err) - } - - par := child.Parent() - - if par == nil { // this should never happen though - return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - return par, nil -} - -func getSplitInfoError(tx *bbolt.Tx, cnr cid.ID, key []byte) error { - splitInfo, err := getSplitInfo(tx, cnr, key) - if err == nil { - return logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo)) - } - - return logicerr.Wrap(new(apistatus.ObjectNotFound)) -} - -func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { - keys, err := decodeList(data) - if err != nil { - return err - } - ecInfo := objectSDK.NewECInfo() - for _, key := range keys { - // check in primary index - objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) - if len(objData) != 0 { - obj := objectSDK.New() - if err := obj.Unmarshal(objData); err != nil { - return err - } - chunk := objectSDK.ECChunk{} - id, _ := obj.ID() - chunk.SetID(id) - chunk.Index = obj.ECHeader().Index() - chunk.Total = obj.ECHeader().Total() - ecInfo.AddChunk(chunk) - } - } - return logicerr.Wrap(objectSDK.NewECInfoError(ecInfo)) -} diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go deleted file mode 100644 index 98c428410..000000000 --- a/pkg/local_object_storage/metabase/get_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package meta_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "runtime" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestDB_Get(t *testing.T) { - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - raw := testutil.GenerateObject() - - // equal fails on diff of attributes and <{}> attributes, - /* so we make non empty attribute slice in parent*/ - testutil.AddAttribute(raw, "foo", "bar") - - t.Run("object not found", func(t *testing.T) { - _, err := metaGet(db, object.AddressOf(raw), false) - require.Error(t, err) - }) - - t.Run("put regular object", func(t *testing.T) { - err := putBig(db, raw) - require.NoError(t, err) - - newObj, err := metaGet(db, object.AddressOf(raw), false) - require.NoError(t, err) - require.Equal(t, raw.CutPayload(), newObj) - }) - - t.Run("put tombstone object", func(t *testing.T) { - raw.SetType(objectSDK.TypeTombstone) - raw.SetID(oidtest.ID()) - - err := putBig(db, raw) - require.NoError(t, err) - - newObj, err := metaGet(db, object.AddressOf(raw), false) - require.NoError(t, err) - require.Equal(t, raw.CutPayload(), newObj) - }) - - t.Run("put lock object", func(t *testing.T) { - raw.SetType(objectSDK.TypeLock) - raw.SetID(oidtest.ID()) - - err := putBig(db, raw) - require.NoError(t, err) - - newObj, err := metaGet(db, object.AddressOf(raw), false) - require.NoError(t, err) - require.Equal(t, raw.CutPayload(), newObj) - }) - - t.Run("put virtual object", func(t *testing.T) { - cnr := cidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(parent, "foo", "bar") - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - child.SetSplitID(splitID) - - err := putBig(db, child) - require.NoError(t, err) - - t.Run("raw is true", func(t *testing.T) { - _, err = metaGet(db, object.AddressOf(parent), true) - require.Error(t, err) - - var siErr *objectSDK.SplitInfoError - require.ErrorAs(t, err, &siErr) - require.Equal(t, splitID, siErr.SplitInfo().SplitID()) - - id1, _ := child.ID() - id2, _ := siErr.SplitInfo().LastPart() - require.Equal(t, id1, id2) - - _, ok := siErr.SplitInfo().Link() - require.False(t, ok) - }) - - newParent, err := metaGet(db, object.AddressOf(parent), false) - require.NoError(t, err) - require.True(t, binaryEqual(parent.CutPayload(), newParent)) - - newChild, err := metaGet(db, object.AddressOf(child), true) - require.NoError(t, err) - require.True(t, binaryEqual(child.CutPayload(), newChild)) - }) - - t.Run("put erasure-coded object", func(t *testing.T) { - cnr := cidtest.ID() - virtual := testutil.GenerateObjectWithCID(cnr) - c, err := erasurecode.NewConstructor(3, 1) - require.NoError(t, err) - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - parts, err := c.Split(virtual, &pk.PrivateKey) - require.NoError(t, err) - for _, part := range parts { - err = putBig(db, part) - var eiError *objectSDK.ECInfoError - if err != nil && !errors.As(err, &eiError) { - require.NoError(t, err) - } - } - _, err = metaGet(db, object.AddressOf(virtual), true) - var eiError *objectSDK.ECInfoError - require.ErrorAs(t, err, &eiError) - require.Equal(t, len(eiError.ECInfo().Chunks), len(parts)) - for _, chunk := range eiError.ECInfo().Chunks { - var found bool - for _, part := range parts { - partID, _ := part.ID() - var chunkID oid.ID - require.NoError(t, chunkID.ReadFromV2(chunk.ID)) - if chunkID.Equals(partID) { - found = true - } - } - if !found { - require.Fail(t, "chunk not found") - } - } - }) - - t.Run("get removed object", func(t *testing.T) { - obj := oidtest.Address() - - require.NoError(t, metaInhume(db, obj, oidtest.ID())) - _, err := metaGet(db, obj, false) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - - obj = oidtest.Address() - - var prm meta.InhumePrm - prm.SetAddresses(obj) - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - _, err = metaGet(db, obj, false) - require.True(t, client.IsErrObjectNotFound(err)) - }) - - t.Run("expired object", func(t *testing.T) { - checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { - gotExp, err := metaGet(db, object.AddressOf(exp), false) - require.Nil(t, gotExp) - require.ErrorIs(t, err, meta.ErrObjectIsExpired) - - gotNonExp, err := metaGet(db, object.AddressOf(nonExp), false) - require.NoError(t, err) - require.True(t, binaryEqual(gotNonExp, nonExp.CutPayload())) - }) - }) -} - -// binary equal is used when object contains empty lists in the structure and -// requre.Equal fails on comparing and []{} lists. -func binaryEqual(a, b *objectSDK.Object) bool { - binaryA, err := a.Marshal() - if err != nil { - return false - } - - binaryB, err := b.Marshal() - if err != nil { - return false - } - - return bytes.Equal(binaryA, binaryB) -} - -func BenchmarkGet(b *testing.B) { - numOfObjects := [...]int{ - 1, - 10, - 100, - } - - defer func() { - _ = os.RemoveAll(b.Name()) - }() - - for _, num := range numOfObjects { - b.Run(fmt.Sprintf("%d_objects", num), func(b *testing.B) { - benchmarkGet(b, num) - }) - } -} - -func benchmarkGet(b *testing.B, numOfObj int) { - prepareDb := func(batchSize int) (*meta.DB, []oid.Address) { - db := newDB(b, - meta.WithMaxBatchSize(batchSize), - meta.WithMaxBatchDelay(10*time.Millisecond), - ) - addrs := make([]oid.Address, 0, numOfObj) - - for range numOfObj { - raw := testutil.GenerateObject() - addrs = append(addrs, object.AddressOf(raw)) - - err := putBig(db, raw) - require.NoError(b, err) - } - - return db, addrs - } - - db, addrs := prepareDb(runtime.NumCPU()) - defer func() { require.NoError(b, db.Close(context.Background())) }() - - b.Run("parallel", func(b *testing.B) { - b.ReportAllocs() - b.RunParallel(func(pb *testing.PB) { - var counter int - - for pb.Next() { - var getPrm meta.GetPrm - getPrm.SetAddress(addrs[counter%len(addrs)]) - counter++ - - _, err := db.Get(context.Background(), getPrm) - if err != nil { - b.Fatal(err) - } - } - }) - }) - - require.NoError(b, db.Close(context.Background())) - require.NoError(b, os.RemoveAll(b.Name())) - - db, addrs = prepareDb(1) - - b.Run("serial", func(b *testing.B) { - b.ReportAllocs() - for i := range b.N { - var getPrm meta.GetPrm - getPrm.SetAddress(addrs[i%len(addrs)]) - - _, err := db.Get(context.Background(), getPrm) - if err != nil { - b.Fatal(err) - } - } - }) -} - -func metaGet(db *meta.DB, addr oid.Address, raw bool) (*objectSDK.Object, error) { - var prm meta.GetPrm - prm.SetAddress(addr) - prm.SetRaw(raw) - - res, err := db.Get(context.Background(), prm) - return res.Header(), err -} diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go deleted file mode 100644 index 2f23d424c..000000000 --- a/pkg/local_object_storage/metabase/graveyard.go +++ /dev/null @@ -1,313 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" -) - -// GarbageObject represents descriptor of the -// object that has been marked with GC. -type GarbageObject struct { - addr oid.Address -} - -// Address returns garbage object address. -func (g GarbageObject) Address() oid.Address { - return g.addr -} - -// GarbageHandler is a GarbageObject handling function. -type GarbageHandler func(GarbageObject) error - -// GarbageIterationPrm groups parameters of the garbage -// iteration process. -type GarbageIterationPrm struct { - h GarbageHandler - offset *oid.Address -} - -// SetHandler sets a handler that will be called on every -// GarbageObject. -func (g *GarbageIterationPrm) SetHandler(h GarbageHandler) { - g.h = h -} - -// SetOffset sets an offset of the iteration operation. -// The handler will be applied to the next after the -// specified offset if any are left. -// -// Note: if offset is not found in db, iteration starts -// from the element that WOULD BE the following after the -// offset if offset was presented. That means that it is -// safe to delete offset element and pass if to the -// iteration once again: iteration would start from the -// next element. -// -// Nil offset means start an integration from the beginning. -func (g *GarbageIterationPrm) SetOffset(offset oid.Address) { - g.offset = &offset -} - -// IterateOverGarbage iterates over all objects -// marked with GC mark. -// -// If h returns ErrInterruptIterator, nil returns immediately. -// Returns other errors of h directly. -func (db *DB) IterateOverGarbage(ctx context.Context, p GarbageIterationPrm) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateOverGarbage", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverGarbage") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - err := metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateDeletedObj(tx, gcHandler{p.h}, p.offset) - })) - success = err == nil - return err -} - -// TombstonedObject represents descriptor of the -// object that has been covered with tombstone. -type TombstonedObject struct { - addr oid.Address - tomb oid.Address -} - -// Address returns tombstoned object address. -func (g TombstonedObject) Address() oid.Address { - return g.addr -} - -// Tombstone returns address of a tombstone that -// covers object. -func (g TombstonedObject) Tombstone() oid.Address { - return g.tomb -} - -// TombstonedHandler is a TombstonedObject handling function. -type TombstonedHandler func(object TombstonedObject) error - -// GraveyardIterationPrm groups parameters of the graveyard -// iteration process. -type GraveyardIterationPrm struct { - h TombstonedHandler - offset *oid.Address -} - -// SetHandler sets a handler that will be called on every -// TombstonedObject. -func (g *GraveyardIterationPrm) SetHandler(h TombstonedHandler) { - g.h = h -} - -// SetOffset sets an offset of the iteration operation. -// The handler will be applied to the next after the -// specified offset if any are left. -// -// Note: if offset is not found in db, iteration starts -// from the element that WOULD BE the following after the -// offset if offset was presented. That means that it is -// safe to delete offset element and pass it to the -// iteration once again: iteration would start from the -// next element. -// -// Nil offset means start an integration from the beginning. -func (g *GraveyardIterationPrm) SetOffset(offset oid.Address) { - g.offset = &offset -} - -// IterateOverGraveyard iterates over all graves in DB. -// -// If h returns ErrInterruptIterator, nil returns immediately. -// Returns other errors of h directly. -func (db *DB) IterateOverGraveyard(ctx context.Context, p GraveyardIterationPrm) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateOverGraveyard", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverGraveyard") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - return metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateDeletedObj(tx, graveyardHandler{p.h}, p.offset) - })) -} - -type kvHandler interface { - handleKV(k, v []byte) error -} - -type gcHandler struct { - h GarbageHandler -} - -func (g gcHandler) handleKV(k, _ []byte) error { - o, err := garbageFromKV(k) - if err != nil { - return fmt.Errorf("parse garbage object: %w", err) - } - - return g.h(o) -} - -type graveyardHandler struct { - h TombstonedHandler -} - -func (g graveyardHandler) handleKV(k, v []byte) error { - o, err := graveFromKV(k, v) - if err != nil { - return fmt.Errorf("parse grave: %w", err) - } - - return g.h(o) -} - -func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address) error { - var bkt *bbolt.Bucket - switch t := h.(type) { - case graveyardHandler: - bkt = tx.Bucket(graveyardBucketName) - case gcHandler: - bkt = tx.Bucket(garbageBucketName) - default: - panic(fmt.Sprintf("metabase: unknown iteration object hadler: %T", t)) - } - - c := bkt.Cursor() - var k, v []byte - - if offset == nil { - k, v = c.First() - } else { - rawAddr := addressKey(*offset, make([]byte, addressKeySize)) - - k, v = c.Seek(rawAddr) - if bytes.Equal(k, rawAddr) { - // offset was found, move - // cursor to the next element - k, v = c.Next() - } - } - - for ; k != nil; k, v = c.Next() { - err := h.handleKV(k, v) - if err != nil { - if errors.Is(err, ErrInterruptIterator) { - return nil - } - - return err - } - } - - return nil -} - -func garbageFromKV(k []byte) (res GarbageObject, err error) { - err = decodeAddressFromKey(&res.addr, k) - if err != nil { - err = fmt.Errorf("parse address: %w", err) - } - - return -} - -func graveFromKV(k, v []byte) (res TombstonedObject, err error) { - if err = decodeAddressFromKey(&res.addr, k); err != nil { - err = fmt.Errorf("decode tombstone target from key: %w", err) - } else if err = decodeAddressFromKey(&res.tomb, v); err != nil { - err = fmt.Errorf("decode tombstone address from value: %w", err) - } - - return -} - -// InhumeTombstones deletes tombstoned objects from the -// graveyard bucket. -// -// Returns any error appeared during deletion process. -func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return InhumeRes{}, ErrDegradedMode - } else if db.mode.ReadOnly() { - return InhumeRes{}, ErrReadOnlyMode - } - - buf := make([]byte, addressKeySize) - prm := InhumePrm{forceRemoval: true} - currEpoch := db.epochState.CurrentEpoch() - - var res InhumeRes - - err := db.boltDB.Batch(func(tx *bbolt.Tx) error { - res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)} - - garbageBKT := tx.Bucket(garbageBucketName) - graveyardBKT := tx.Bucket(graveyardBucketName) - - bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm) - if err != nil { - return err - } - - for i := range tss { - if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil { - return err - } - if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil { - return err - } - } - - return nil - }) - return res, err -} diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go deleted file mode 100644 index ebadecc04..000000000 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ /dev/null @@ -1,466 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - var counter int - var iterGravePRM meta.GraveyardIterationPrm - - iterGravePRM.SetHandler(func(garbage meta.TombstonedObject) error { - counter++ - return nil - }) - - err := db.IterateOverGraveyard(context.Background(), iterGravePRM) - require.NoError(t, err) - require.Zero(t, counter) - - var iterGCPRM meta.GarbageIterationPrm - iterGCPRM.SetHandler(func(garbage meta.GarbageObject) error { - counter++ - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGCPRM) - require.NoError(t, err) - require.Zero(t, counter) -} - -func TestDB_Iterate_OffsetNotFound(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() - - var addr1 oid.Address - err := addr1.DecodeString("AUSF6rhReoAdPVKYUZWW9o2LbtTvekn54B3JXi7pdzmn/2daLhLB7yVXbjBaKkckkuvjX22BxRYuSHy9RPxuH9PZS") - require.NoError(t, err) - - var addr2 oid.Address - err = addr2.DecodeString("CwYYr6sFLU1zK6DeBTVd8SReADUoxYobUhSrxgXYxCVn/ANYbnJoQqdjmU5Dhk3LkxYj5E9nJHQFf8LjTEcap9TxM") - require.NoError(t, err) - - var addr3 oid.Address - err = addr3.DecodeString("6ay4GfhR9RgN28d5ufg63toPetkYHGcpcW7G3b7QWSek/ANYbnJoQqdjmU5Dhk3LkxYj5E9nJHQFf8LjTEcap9TxM") - require.NoError(t, err) - - obj1.SetContainerID(addr1.Container()) - obj1.SetID(addr1.Object()) - - obj2.SetContainerID(addr2.Container()) - obj2.SetID(addr2.Object()) - - err = putBig(db, obj1) - require.NoError(t, err) - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(object.AddressOf(obj1)) - inhumePrm.SetGCMark() - - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - var counter int - - var iterGCPRM meta.GarbageIterationPrm - iterGCPRM.SetOffset(object.AddressOf(obj2)) - iterGCPRM.SetHandler(func(garbage meta.GarbageObject) error { - require.Equal(t, garbage.Address(), addr1) - counter++ - - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGCPRM) - require.NoError(t, err) - - // the second object would be put after the - // first, so it is expected that iteration - // will not receive the first object - require.Equal(t, 0, counter) - - iterGCPRM.SetOffset(addr3) - iterGCPRM.SetHandler(func(garbage meta.GarbageObject) error { - require.Equal(t, garbage.Address(), addr1) - counter++ - - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGCPRM) - require.NoError(t, err) - - // the third object would be put before the - // first, so it is expected that iteration - // will receive the first object - require.Equal(t, 1, counter) -} - -func TestDB_IterateDeletedObjects(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - // generate and put 4 objects - obj1 := testutil.GenerateObjectWithCID(cnr) - obj2 := testutil.GenerateObjectWithCID(cnr) - obj3 := testutil.GenerateObjectWithCID(cnr) - obj4 := testutil.GenerateObjectWithCID(cnr) - - var err error - - err = putBig(db, obj1) - require.NoError(t, err) - - err = putBig(db, obj2) - require.NoError(t, err) - - err = putBig(db, obj3) - require.NoError(t, err) - - err = putBig(db, obj4) - require.NoError(t, err) - - var inhumePrm meta.InhumePrm - - // inhume with tombstone - addrTombstone := oidtest.Address() - addrTombstone.SetContainer(cnr) - - inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) - inhumePrm.SetTombstoneAddress(addrTombstone) - - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4)) - inhumePrm.SetGCMark() - - // inhume with GC mark - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - var ( - counterAll int - buriedTS, buriedGC []oid.Address - ) - - var iterGravePRM meta.GraveyardIterationPrm - iterGravePRM.SetHandler(func(tomstoned meta.TombstonedObject) error { - require.Equal(t, addrTombstone, tomstoned.Tombstone()) - - buriedTS = append(buriedTS, tomstoned.Address()) - counterAll++ - - return nil - }) - - err = db.IterateOverGraveyard(context.Background(), iterGravePRM) - require.NoError(t, err) - - var iterGCPRM meta.GarbageIterationPrm - iterGCPRM.SetHandler(func(garbage meta.GarbageObject) error { - buriedGC = append(buriedGC, garbage.Address()) - counterAll++ - - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGCPRM) - require.NoError(t, err) - - // objects covered with a tombstone - // also receive GS mark - garbageExpected := []oid.Address{ - object.AddressOf(obj1), object.AddressOf(obj2), - object.AddressOf(obj3), object.AddressOf(obj4), - } - - graveyardExpected := []oid.Address{ - object.AddressOf(obj1), object.AddressOf(obj2), - } - - require.Equal(t, len(garbageExpected)+len(graveyardExpected), counterAll) - require.ElementsMatch(t, graveyardExpected, buriedTS) - require.ElementsMatch(t, garbageExpected, buriedGC) -} - -func TestDB_IterateOverGraveyard_Offset(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - // generate and put 4 objects - obj1 := testutil.GenerateObjectWithCID(cnr) - obj2 := testutil.GenerateObjectWithCID(cnr) - obj3 := testutil.GenerateObjectWithCID(cnr) - obj4 := testutil.GenerateObjectWithCID(cnr) - - var err error - - err = putBig(db, obj1) - require.NoError(t, err) - - err = putBig(db, obj2) - require.NoError(t, err) - - err = putBig(db, obj3) - require.NoError(t, err) - - err = putBig(db, obj4) - require.NoError(t, err) - - // inhume with tombstone - addrTombstone := oidtest.Address() - addrTombstone.SetContainer(cnr) - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses( - object.AddressOf(obj1), object.AddressOf(obj2), - object.AddressOf(obj3), object.AddressOf(obj4)) - inhumePrm.SetTombstoneAddress(addrTombstone) - - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - expectedGraveyard := []oid.Address{ - object.AddressOf(obj1), object.AddressOf(obj2), - object.AddressOf(obj3), object.AddressOf(obj4), - } - - var ( - counter int - firstIterationSize = len(expectedGraveyard) / 2 - - gotGraveyard []oid.Address - ) - - var iterGraveyardPrm meta.GraveyardIterationPrm - iterGraveyardPrm.SetHandler(func(tombstoned meta.TombstonedObject) error { - require.Equal(t, addrTombstone, tombstoned.Tombstone()) - - gotGraveyard = append(gotGraveyard, tombstoned.Address()) - - counter++ - if counter == firstIterationSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - err = db.IterateOverGraveyard(context.Background(), iterGraveyardPrm) - require.NoError(t, err) - require.Equal(t, firstIterationSize, counter) - require.Equal(t, firstIterationSize, len(gotGraveyard)) - - // last received address is an offset - offset := gotGraveyard[len(gotGraveyard)-1] - iterGraveyardPrm.SetOffset(offset) - iterGraveyardPrm.SetHandler(func(tombstoned meta.TombstonedObject) error { - require.Equal(t, addrTombstone, tombstoned.Tombstone()) - - gotGraveyard = append(gotGraveyard, tombstoned.Address()) - counter++ - - return nil - }) - - err = db.IterateOverGraveyard(context.Background(), iterGraveyardPrm) - require.NoError(t, err) - require.Equal(t, len(expectedGraveyard), counter) - require.ElementsMatch(t, gotGraveyard, expectedGraveyard) - - // last received object (last in db) as offset - // should lead to no iteration at all - offset = gotGraveyard[len(gotGraveyard)-1] - iterGraveyardPrm.SetOffset(offset) - iWasCalled := false - iterGraveyardPrm.SetHandler(func(tombstoned meta.TombstonedObject) error { - iWasCalled = true - return nil - }) - - err = db.IterateOverGraveyard(context.Background(), iterGraveyardPrm) - require.NoError(t, err) - require.False(t, iWasCalled) -} - -func TestDB_IterateOverGarbage_Offset(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - // generate and put 4 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() - obj3 := testutil.GenerateObject() - obj4 := testutil.GenerateObject() - - var err error - - err = putBig(db, obj1) - require.NoError(t, err) - - err = putBig(db, obj2) - require.NoError(t, err) - - err = putBig(db, obj3) - require.NoError(t, err) - - err = putBig(db, obj4) - require.NoError(t, err) - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses( - object.AddressOf(obj1), object.AddressOf(obj2), - object.AddressOf(obj3), object.AddressOf(obj4)) - inhumePrm.SetGCMark() - - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - expectedGarbage := []oid.Address{ - object.AddressOf(obj1), object.AddressOf(obj2), - object.AddressOf(obj3), object.AddressOf(obj4), - } - - var ( - counter int - firstIterationSize = len(expectedGarbage) / 2 - - gotGarbage []oid.Address - ) - - var iterGarbagePrm meta.GarbageIterationPrm - iterGarbagePrm.SetHandler(func(garbage meta.GarbageObject) error { - gotGarbage = append(gotGarbage, garbage.Address()) - - counter++ - if counter == firstIterationSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGarbagePrm) - require.NoError(t, err) - require.Equal(t, firstIterationSize, counter) - require.Equal(t, firstIterationSize, len(gotGarbage)) - - // last received address is an offset - offset := gotGarbage[len(gotGarbage)-1] - iterGarbagePrm.SetOffset(offset) - iterGarbagePrm.SetHandler(func(garbage meta.GarbageObject) error { - gotGarbage = append(gotGarbage, garbage.Address()) - counter++ - - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGarbagePrm) - require.NoError(t, err) - require.Equal(t, len(expectedGarbage), counter) - require.ElementsMatch(t, gotGarbage, expectedGarbage) - - // last received object (last in db) as offset - // should lead to no iteration at all - offset = gotGarbage[len(gotGarbage)-1] - iterGarbagePrm.SetOffset(offset) - iWasCalled := false - iterGarbagePrm.SetHandler(func(garbage meta.GarbageObject) error { - iWasCalled = true - return nil - }) - - err = db.IterateOverGarbage(context.Background(), iterGarbagePrm) - require.NoError(t, err) - require.False(t, iWasCalled) -} - -func TestDB_InhumeTombstones(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - // generate and put 2 objects - obj1 := testutil.GenerateObjectWithCID(cnr) - obj2 := testutil.GenerateObjectWithCID(cnr) - - var err error - - err = putBig(db, obj1) - require.NoError(t, err) - - err = putBig(db, obj2) - require.NoError(t, err) - - id1, _ := obj1.ID() - id2, _ := obj2.ID() - ts := objectSDK.NewTombstone() - ts.SetMembers([]oid.ID{id1, id2}) - objTs := objectSDK.New() - objTs.SetContainerID(cnr) - objTs.SetType(objectSDK.TypeTombstone) - - data, _ := ts.Marshal() - objTs.SetPayload(data) - require.NoError(t, objectSDK.CalculateAndSetID(objTs)) - require.NoError(t, putBig(db, objTs)) - - addrTombstone := object.AddressOf(objTs) - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) - inhumePrm.SetTombstoneAddress(addrTombstone) - - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - buriedTS := make([]meta.TombstonedObject, 0) - var iterGravePRM meta.GraveyardIterationPrm - var counter int - iterGravePRM.SetHandler(func(tomstoned meta.TombstonedObject) error { - buriedTS = append(buriedTS, tomstoned) - counter++ - - return nil - }) - - err = db.IterateOverGraveyard(context.Background(), iterGravePRM) - require.NoError(t, err) - require.Equal(t, 2, counter) - - res, err := db.InhumeTombstones(context.Background(), buriedTS) - require.NoError(t, err) - require.EqualValues(t, 1, res.LogicInhumed()) - require.EqualValues(t, 0, res.UserInhumed()) - require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID()) - - counter = 0 - iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error { - counter++ - return nil - }) - - err = db.IterateOverGraveyard(context.Background(), iterGravePRM) - require.NoError(t, err) - require.Zero(t, counter) -} diff --git a/pkg/local_object_storage/metabase/index_test.go b/pkg/local_object_storage/metabase/index_test.go deleted file mode 100644 index 45b9bc756..000000000 --- a/pkg/local_object_storage/metabase/index_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package meta - -import ( - "crypto/rand" - "math" - mrand "math/rand" - "testing" - "time" - - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/stretchr/testify/require" -) - -func Test_getVarUint(t *testing.T) { - data := make([]byte, 10) - for _, val := range []uint64{0, 0xfc, 0xfd, 0xfffe, 0xffff, 0xfffffffe, 0xffffffff, math.MaxUint64} { - expSize := io.PutVarUint(data, val) - actual, actSize, err := getVarUint(data) - require.NoError(t, err) - require.Equal(t, val, actual) - require.Equal(t, expSize, actSize, "value: %x", val) - - _, _, err = getVarUint(data[:expSize-1]) - require.Error(t, err) - } -} - -func Test_decodeList(t *testing.T) { - t.Run("empty", func(t *testing.T) { - lst, err := decodeList(nil) - require.NoError(t, err) - require.True(t, len(lst) == 0) - }) - t.Run("empty, 0 len", func(t *testing.T) { - lst, err := decodeList([]byte{0}) - require.NoError(t, err) - require.True(t, len(lst) == 0) - }) - t.Run("bad len", func(t *testing.T) { - _, err := decodeList([]byte{0xfe}) - require.Error(t, err) - }) - t.Run("random", func(t *testing.T) { - r := mrand.New(mrand.NewSource(time.Now().Unix())) - expected := make([][]byte, 20) - for i := range expected { - expected[i] = make([]byte, r.Uint32()%10) - rand.Read(expected[i]) - } - - data, err := encodeList(expected) - require.NoError(t, err) - - actual, err := decodeList(data) - require.NoError(t, err) - require.Equal(t, expected, actual) - - t.Run("unexpected EOF", func(t *testing.T) { - for i := 1; i < len(data)-1; i++ { - _, err := decodeList(data[:i]) - require.Error(t, err) - } - }) - }) -} diff --git a/pkg/local_object_storage/metabase/info.go b/pkg/local_object_storage/metabase/info.go deleted file mode 100644 index 4d6a06239..000000000 --- a/pkg/local_object_storage/metabase/info.go +++ /dev/null @@ -1,22 +0,0 @@ -package meta - -import ( - "io/fs" -) - -// Info groups the information about DB. -type Info struct { - // Full path to the metabase. - Path string - - // Permission of database file. - Permission fs.FileMode -} - -// DumpInfo returns information about the DB. -func (db *DB) DumpInfo() Info { - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - return db.info -} diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go deleted file mode 100644 index 76018fb61..000000000 --- a/pkg/local_object_storage/metabase/inhume.go +++ /dev/null @@ -1,435 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "errors" - "fmt" - "time" - - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" -) - -// InhumePrm encapsulates parameters for Inhume operation. -type InhumePrm struct { - tomb *oid.Address - - target []oid.Address - - lockObjectHandling bool - - forceRemoval bool -} - -// DeletionInfo contains details on deleted object. -type DeletionInfo struct { - Size uint64 - CID cid.ID - IsUser bool -} - -// InhumeRes encapsulates results of Inhume operation. -type InhumeRes struct { - deletedLockObj []oid.Address - logicInhumed uint64 - userInhumed uint64 - inhumedByCnrID map[cid.ID]ObjectCounters - deletionDetails []DeletionInfo -} - -// LogicInhumed return number of logic object -// that have been inhumed. -func (i InhumeRes) LogicInhumed() uint64 { - return i.logicInhumed -} - -func (i InhumeRes) UserInhumed() uint64 { - return i.userInhumed -} - -// InhumedByCnrID return number of object -// that have been inhumed by container ID. -func (i InhumeRes) InhumedByCnrID() map[cid.ID]ObjectCounters { - return i.inhumedByCnrID -} - -// DeletedLockObjects returns deleted object of LOCK -// type. Returns always nil if WithoutLockObjectHandling -// was provided to the InhumePrm. -func (i InhumeRes) DeletedLockObjects() []oid.Address { - return i.deletedLockObj -} - -// GetDeletionInfoLength returns amount of stored elements -// in deleted sizes array. -func (i InhumeRes) GetDeletionInfoLength() int { - return len(i.deletionDetails) -} - -// GetDeletionInfoByIndex returns both deleted object sizes and -// associated container ID by index. -func (i InhumeRes) GetDeletionInfoByIndex(target int) DeletionInfo { - return i.deletionDetails[target] -} - -// StoreDeletionInfo stores size of deleted object and associated container ID -// in corresponding arrays. -func (i *InhumeRes) storeDeletionInfo(containerID cid.ID, deletedSize uint64, isUser bool) { - i.deletionDetails = append(i.deletionDetails, DeletionInfo{ - Size: deletedSize, - CID: containerID, - IsUser: isUser, - }) - i.logicInhumed++ - if isUser { - i.userInhumed++ - } - - if v, ok := i.inhumedByCnrID[containerID]; ok { - v.Logic++ - if isUser { - v.User++ - } - i.inhumedByCnrID[containerID] = v - } else { - v = ObjectCounters{ - Logic: 1, - } - if isUser { - v.User = 1 - } - i.inhumedByCnrID[containerID] = v - } -} - -// SetAddresses sets a list of object addresses that should be inhumed. -func (p *InhumePrm) SetAddresses(addrs ...oid.Address) { - p.target = addrs -} - -// SetTombstoneAddress sets tombstone address as the reason for inhume operation. -// -// addr should not be nil. -// Should not be called along with SetGCMark. -func (p *InhumePrm) SetTombstoneAddress(addr oid.Address) { - p.tomb = &addr -} - -// SetGCMark marks the object to be physically removed. -// -// Should not be called along with SetTombstoneAddress. -func (p *InhumePrm) SetGCMark() { - p.tomb = nil -} - -// SetLockObjectHandling checks if there were -// any LOCK object among the targets set via WithAddresses. -func (p *InhumePrm) SetLockObjectHandling() { - p.lockObjectHandling = true -} - -// SetForceGCMark allows removal any object. Expected to be -// called only in control service. -func (p *InhumePrm) SetForceGCMark() { - p.tomb = nil - p.forceRemoval = true -} - -func (p *InhumePrm) validate() error { - if p == nil { - return nil - } - if p.tomb != nil { - for _, addr := range p.target { - if addr.Container() != p.tomb.Container() { - return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb) - } - } - } - return nil -} - -var errBreakBucketForEach = errors.New("bucket ForEach break") - -// ErrLockObjectRemoval is returned when inhume operation is being -// performed on lock object, and it is not a forced object removal. -var ErrLockObjectRemoval = logicerr.New("lock object removal") - -// Inhume marks objects as removed but not removes it from metabase. -// -// Allows inhuming non-locked objects only. Returns apistatus.ObjectLocked -// if at least one object is locked. Returns ErrLockObjectRemoval if inhuming -// is being performed on lock (not locked) object. -// -// NOTE: Marks any object with GC mark (despite any prohibitions on operations -// with that object) if WithForceGCMark option has been provided. -func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Inhume", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.Inhume") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if err := prm.validate(); err != nil { - return InhumeRes{}, err - } - - if db.mode.NoMetabase() { - return InhumeRes{}, ErrDegradedMode - } else if db.mode.ReadOnly() { - return InhumeRes{}, ErrReadOnlyMode - } - - res := InhumeRes{ - inhumedByCnrID: make(map[cid.ID]ObjectCounters), - } - currEpoch := db.epochState.CurrentEpoch() - err := db.boltDB.Batch(func(tx *bbolt.Tx) error { - return db.inhumeTx(tx, currEpoch, prm, &res) - }) - success = err == nil - if success { - for _, addr := range prm.target { - storagelog.Write(ctx, db.log, - storagelog.AddressField(addr), - storagelog.OpField("metabase INHUME")) - } - } - return res, metaerr.Wrap(err) -} - -func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes) error { - garbageBKT := tx.Bucket(garbageBucketName) - graveyardBKT := tx.Bucket(graveyardBucketName) - - bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm) - if err != nil { - return err - } - - buf := make([]byte, addressKeySize) - for i := range prm.target { - if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil { - return err - } - } - - return db.applyInhumeResToCounters(tx, res) -} - -func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error { - id := addr.Object() - cnr := addr.Container() - tx := bkt.Tx() - - // prevent locked objects to be inhumed - if !prm.forceRemoval && objectLocked(tx, cnr, id) { - return new(apistatus.ObjectLocked) - } - - var lockWasChecked bool - - // prevent lock objects to be inhumed - // if `Inhume` was called not with the - // `WithForceGCMark` option - if !prm.forceRemoval { - if isLockObject(tx, cnr, id) { - return ErrLockObjectRemoval - } - - lockWasChecked = true - } - - obj, err := db.get(tx, addr, buf, false, true, epoch) - targetKey := addressKey(addr, buf) - var ecErr *objectSDK.ECInfoError - if err == nil { - err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res) - if err != nil { - return err - } - } else if errors.As(err, &ecErr) { - err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value) - if err != nil { - return err - } - } - - if prm.tomb != nil { - var isTomb bool - isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey) - if err != nil { - return err - } - - if isTomb { - return nil - } - } - - // consider checking if target is already in graveyard? - err = bkt.Put(targetKey, value) - if err != nil { - return err - } - - if prm.lockObjectHandling { - // do not perform lock check if - // it was already called - if lockWasChecked { - // inhumed object is not of - // the LOCK type - return nil - } - - if isLockObject(tx, cnr, id) { - res.deletedLockObj = append(res.deletedLockObj, addr) - } - } - return nil -} - -func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes, - garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket, - ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte, -) error { - for _, chunk := range ecInfo.Chunks { - chunkBuf := make([]byte, addressKeySize) - var chunkAddr oid.Address - chunkAddr.SetContainer(cnr) - var chunkID oid.ID - err := chunkID.ReadFromV2(chunk.ID) - if err != nil { - return err - } - chunkAddr.SetObject(chunkID) - chunkObj, err := db.get(tx, chunkAddr, chunkBuf, false, true, epoch) - if err != nil { - return err - } - chunkKey := addressKey(chunkAddr, chunkBuf) - err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, chunkKey, cnr, chunkObj, res) - if err != nil { - return err - } - if tomb != nil { - _, err = db.markAsGC(graveyardBKT, garbageBKT, chunkKey) - if err != nil { - return err - } - } - err = targetBucket.Put(chunkKey, value) - if err != nil { - return err - } - } - return nil -} - -func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { - if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil { - return err - } - if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil { - return err - } - - return db.updateContainerCounter(tx, res.inhumedByCnrID, false) -} - -// getInhumeTargetBucketAndValue return target bucket to store inhume result and value that will be put in the bucket. -// -// target bucket of the operation, one of the: -// 1. Graveyard if Inhume was called with a Tombstone -// 2. Garbage if Inhume was called with a GC mark -// -// value that will be put in the bucket, one of the: -// 1. tombstone address if Inhume was called with -// a Tombstone -// 2. zeroValue if Inhume was called with a GC mark -func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) { - if prm.tomb != nil { - targetBucket = graveyardBKT - tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize)) - - // it is forbidden to have a tomb-on-tomb in FrostFS, - // so graveyard keys must not be addresses of tombstones - data := targetBucket.Get(tombKey) - if data != nil { - err := targetBucket.Delete(tombKey) - if err != nil { - return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err) - } - } - - value = tombKey - } else { - targetBucket = garbageBKT - value = zeroValue - } - return targetBucket, value, nil -} - -func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) { - targetIsTomb := isTomb(graveyardBKT, addressKey) - - // do not add grave if target is a tombstone - if targetIsTomb { - return true, nil - } - - // if tombstone appears object must be - // additionally marked with GC - return false, garbageBKT.Put(addressKey, zeroValue) -} - -func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error { - containerID, _ := obj.ContainerID() - if inGraveyardWithKey(targetKey, graveyardBKT, garbageBKT) == 0 { - res.storeDeletionInfo(containerID, obj.PayloadSize(), IsUserObject(obj)) - } - - // if object is stored, and it is regular object then update bucket - // with container size estimations - if obj.Type() == objectSDK.TypeRegular { - err := changeContainerSize(tx, cnr, obj.PayloadSize(), false) - if err != nil { - return err - } - } - return nil -} - -func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool { - targetIsTomb := false - - // iterate over graveyard and check if target address - // is the address of tombstone in graveyard. - // tombstone must have the same container ID as key. - c := graveyardBucket.Cursor() - containerPrefix := addressKey[:cidSize] - for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() { - // check if graveyard has record with key corresponding - // to tombstone address (at least one) - targetIsTomb = bytes.Equal(v, addressKey) - if targetIsTomb { - break - } - } - return targetIsTomb -} diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go deleted file mode 100644 index 180713287..000000000 --- a/pkg/local_object_storage/metabase/inhume_ec_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package meta - -import ( - "context" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestInhumeECObject(t *testing.T) { - t.Parallel() - - db := New( - WithPath(filepath.Join(t.TempDir(), "metabase")), - WithPermissions(0o600), - WithEpochState(epochState{uint64(12)}), - ) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - ecChunk := oidtest.ID() - ecChunk2 := oidtest.ID() - ecParent := oidtest.ID() - tombstoneID := oidtest.ID() - - chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetID(ecChunk) - chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) - chunkObj.SetPayloadSize(uint64(5)) - chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0)) - - chunkObj2 := testutil.GenerateObjectWithCID(cnr) - chunkObj2.SetID(ecChunk2) - chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) - chunkObj2.SetPayloadSize(uint64(10)) - chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 1, 3, []byte{}, 0)) - - // put object with EC - - var prm PutPrm - prm.SetObject(chunkObj) - prm.SetStorageID([]byte("0/0")) - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - - prm.SetObject(chunkObj2) - _, err = db.Put(context.Background(), prm) - require.NoError(t, err) - - var ecChunkAddress oid.Address - ecChunkAddress.SetContainer(cnr) - ecChunkAddress.SetObject(ecChunk) - - var ecParentAddress oid.Address - ecParentAddress.SetContainer(cnr) - ecParentAddress.SetObject(ecParent) - - var chunkObjectAddress oid.Address - chunkObjectAddress.SetContainer(cnr) - chunkObjectAddress.SetObject(ecChunk) - - var getPrm GetPrm - - getPrm.SetAddress(ecChunkAddress) - _, err = db.Get(context.Background(), getPrm) - require.NoError(t, err) - - var ecInfoError *objectSDK.ECInfoError - getPrm.SetAddress(ecParentAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, &ecInfoError) - require.True(t, len(ecInfoError.ECInfo().Chunks) == 2 && - ecInfoError.ECInfo().Chunks[0].Index == 0 && - ecInfoError.ECInfo().Chunks[0].Total == 3) - - // inhume Chunk - var inhumePrm InhumePrm - var tombAddress oid.Address - inhumePrm.SetAddresses(chunkObjectAddress) - res, err := db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - require.True(t, len(res.deletionDetails) == 1) - require.True(t, res.deletionDetails[0].Size == 5) - - // inhume EC parent (like Delete does) - tombAddress.SetContainer(cnr) - tombAddress.SetObject(tombstoneID) - inhumePrm.SetAddresses(ecParentAddress) - inhumePrm.SetTombstoneAddress(tombAddress) - res, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - // Previously deleted chunk shouldn't be in the details, because it is marked as garbage - require.True(t, len(res.deletionDetails) == 1) - require.True(t, res.deletionDetails[0].Size == 10) - - getPrm.SetAddress(ecParentAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) - - getPrm.SetAddress(ecChunkAddress) - _, err = db.Get(context.Background(), getPrm) - require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved)) -} diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go deleted file mode 100644 index 786d10396..000000000 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDB_Inhume(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - raw := testutil.GenerateObject() - testutil.AddAttribute(raw, "foo", "bar") - - err := putBig(db, raw) - require.NoError(t, err) - - err = metaInhume(db, object.AddressOf(raw), oidtest.ID()) - require.NoError(t, err) - - _, err = metaExists(db, object.AddressOf(raw)) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - - _, err = metaGet(db, object.AddressOf(raw), false) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) -} - -func TestInhumeTombOnTomb(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - var ( - err error - - cnr = cidtest.ID() - addr1 = oidtest.Address() - addr2 = oidtest.Address() - addr3 = oidtest.Address() - addr4 = oidtest.Address() - inhumePrm meta.InhumePrm - existsPrm meta.ExistsPrm - ) - - addr1.SetContainer(cnr) - addr2.SetContainer(cnr) - addr3.SetContainer(cnr) - addr4.SetContainer(cnr) - - inhumePrm.SetAddresses(addr1) - inhumePrm.SetTombstoneAddress(addr2) - - // inhume addr1 via addr2 - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - existsPrm.SetAddress(addr1) - - // addr1 should become inhumed {addr1:addr2} - _, err = db.Exists(context.Background(), existsPrm) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - - inhumePrm.SetAddresses(addr3) - inhumePrm.SetTombstoneAddress(addr1) - - // try to inhume addr3 via addr1 - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - // record with {addr1:addr2} should be removed from graveyard - // as a tomb-on-tomb; metabase should return ObjectNotFound - // NOT ObjectAlreadyRemoved since that record has been removed - // from graveyard but addr1 is still marked with GC - _, err = db.Exists(context.Background(), existsPrm) - require.True(t, client.IsErrObjectNotFound(err)) - - existsPrm.SetAddress(addr3) - - // addr3 should be inhumed {addr3: addr1} - _, err = db.Exists(context.Background(), existsPrm) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - - inhumePrm.SetAddresses(addr1) - inhumePrm.SetTombstoneAddress(addr4) - - // try to inhume addr1 (which is already a tombstone in graveyard) - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - existsPrm.SetAddress(addr1) - - // record with addr1 key should not appear in graveyard - // (tomb can not be inhumed) but should be kept as object - // with GC mark - _, err = db.Exists(context.Background(), existsPrm) - require.True(t, client.IsErrObjectNotFound(err)) -} - -func TestInhumeLocked(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - locked := oidtest.Address() - - err := db.Lock(context.Background(), locked.Container(), oidtest.ID(), []oid.ID{locked.Object()}) - require.NoError(t, err) - - var prm meta.InhumePrm - prm.SetAddresses(locked) - - _, err = db.Inhume(context.Background(), prm) - - var e *apistatus.ObjectLocked - require.ErrorAs(t, err, &e) -} - -func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error { - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(target) - var tombAddr oid.Address - tombAddr.SetContainer(target.Container()) - tombAddr.SetObject(tomb) - inhumePrm.SetTombstoneAddress(tombAddr) - - _, err := db.Inhume(context.Background(), inhumePrm) - return err -} diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go deleted file mode 100644 index 9cccd7dad..000000000 --- a/pkg/local_object_storage/metabase/iterators.go +++ /dev/null @@ -1,139 +0,0 @@ -package meta - -import ( - "context" - "errors" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// ExpiredObject is a descriptor of expired object from DB. -type ExpiredObject struct { - typ objectSDK.Type - - addr oid.Address -} - -// Type returns type of the expired object. -func (e *ExpiredObject) Type() objectSDK.Type { - return e.typ -} - -// Address returns address of the expired object. -func (e *ExpiredObject) Address() oid.Address { - return e.addr -} - -// ExpiredObjectHandler is an ExpiredObject handling function. -type ExpiredObjectHandler func(*ExpiredObject) error - -// ErrInterruptIterator is returned by iteration handlers -// as a "break" keyword. -var ErrInterruptIterator = logicerr.New("iterator is interrupted") - -// IterateExpired iterates over all objects in DB which are out of date -// relative to epoch. Locked objects are not included (do not confuse -// with objects of type LOCK). -// -// If h returns ErrInterruptIterator, nil returns immediately. -// Returns other errors of h directly. -func (db *DB) IterateExpired(ctx context.Context, epoch uint64, h ExpiredObjectHandler) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateExpired", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateExpired", - trace.WithAttributes( - attribute.String("epoch", strconv.FormatUint(epoch, 10)), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - err := metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateExpired(tx, epoch, h) - })) - success = err == nil - return err -} - -func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) error { - b := tx.Bucket(expEpochToObjectBucketName) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - expiresAfter, cnr, obj, err := parseExpirationEpochKey(k) - if err != nil { - return err - } - // bucket keys ordered by epoch, no need to continue lookup - if expiresAfter >= epoch { - return nil - } - if objectLocked(tx, cnr, obj) { - continue - } - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(obj) - objKey := objectKey(addr.Object(), make([]byte, objectKeySize)) - err = h(&ExpiredObject{ - typ: firstIrregularObjectType(tx, cnr, objKey), - addr: addr, - }) - if err == nil { - continue - } - if errors.Is(err, ErrInterruptIterator) { - return nil - } - return err - } - return nil -} - -func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error { - var cid cid.ID - var oid oid.ID - obj := objectSDK.New() - - return tx.ForEach(func(name []byte, b *bbolt.Bucket) error { - b58CID, postfix := parseContainerIDWithPrefix(&cid, name) - if len(b58CID) == 0 { - return nil - } - - switch postfix { - case primaryPrefix, - lockersPrefix, - tombstonePrefix: - default: - return nil - } - - return b.ForEach(func(k, v []byte) error { - if oid.Decode(k) == nil && obj.Unmarshal(v) == nil { - return f(cid, oid, obj) - } - - return nil - }) - }) -} diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go deleted file mode 100644 index 4c9579965..000000000 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package meta_test - -import ( - "context" - "strconv" - "testing" - - object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestDB_IterateExpired(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - const epoch = 13 - - mAlive := map[objectSDK.Type]oid.Address{} - mExpired := map[objectSDK.Type]oid.Address{} - - for _, typ := range []objectSDK.Type{ - objectSDK.TypeRegular, - objectSDK.TypeTombstone, - objectSDK.TypeLock, - } { - mAlive[typ] = putWithExpiration(t, db, typ, epoch) - mExpired[typ] = putWithExpiration(t, db, typ, epoch-1) - } - - expiredLocked := putWithExpiration(t, db, objectSDK.TypeRegular, epoch-1) - - require.NoError(t, db.Lock(context.Background(), expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()})) - - err := db.IterateExpired(context.Background(), epoch, func(exp *meta.ExpiredObject) error { - if addr, ok := mAlive[exp.Type()]; ok { - require.NotEqual(t, addr, exp.Address()) - } - - require.NotEqual(t, expiredLocked, exp.Address()) - - addr, ok := mExpired[exp.Type()] - require.True(t, ok) - require.Equal(t, addr, exp.Address()) - - delete(mExpired, exp.Type()) - - return nil - }) - require.NoError(t, err) - - require.Empty(t, mExpired) -} - -func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt uint64) oid.Address { - obj := testutil.GenerateObject() - obj.SetType(typ) - testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(expiresAt, 10)) - - require.NoError(t, putBig(db, obj)) - - return object2.AddressOf(obj) -} diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go deleted file mode 100644 index 2a0bd7f6a..000000000 --- a/pkg/local_object_storage/metabase/list.go +++ /dev/null @@ -1,502 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "time" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// ErrEndOfListing is returned from object listing with cursor -// when storage can't return any more objects after provided -// cursor. Use nil cursor object to start listing again. -var ErrEndOfListing = logicerr.New("end of object listing") - -// Cursor is a type for continuous object listing. -type Cursor struct { - bucketName []byte - inBucketOffset []byte -} - -// ListPrm contains parameters for ListWithCursor operation. -type ListPrm struct { - count int - cursor *Cursor -} - -// SetCount sets maximum amount of addresses that ListWithCursor should return. -func (l *ListPrm) SetCount(count uint32) { - l.count = int(count) -} - -// SetCursor sets cursor for ListWithCursor operation. For initial request -// ignore this param or use nil value. For consecutive requests, use value -// from ListRes. -func (l *ListPrm) SetCursor(cursor *Cursor) { - l.cursor = cursor -} - -// ListRes contains values returned from ListWithCursor operation. -type ListRes struct { - addrList []objectcore.Info - cursor *Cursor -} - -// AddressList returns addresses selected by ListWithCursor operation. -func (l ListRes) AddressList() []objectcore.Info { - return l.addrList -} - -// Cursor returns cursor for consecutive listing requests. -func (l ListRes) Cursor() *Cursor { - return l.cursor -} - -// IterateOverContainersPrm contains parameters for IterateOverContainers operation. -type IterateOverContainersPrm struct { - // Handler function executed upon containers in db. - Handler func(context.Context, objectSDK.Type, cid.ID) error -} - -// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. -type IterateOverObjectsInContainerPrm struct { - // ObjectType type of objects to iterate over. - ObjectType objectSDK.Type - // ContainerID container for objects to iterate over. - ContainerID cid.ID - // Handler function executed upon objects in db. - Handler func(context.Context, *objectcore.Info) error -} - -// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. -type CountAliveObjectsInContainerPrm struct { - // ObjectType type of objects to iterate over. - ObjectType objectSDK.Type - // ContainerID container for objects to iterate over. - ContainerID cid.ID -} - -// ListWithCursor lists physical objects available in metabase starting from -// cursor. Includes objects of all types. Does not include inhumed and expired -// objects. -// Use cursor value from response for consecutive requests. -// -// Returns ErrEndOfListing if there are no more objects to return or count -// parameter is set to zero. -func (db *DB) ListWithCursor(ctx context.Context, prm ListPrm) (res ListRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("ListWithCursor", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.ListWithCursor", - trace.WithAttributes( - attribute.Int("count", prm.count), - attribute.Bool("has_cursor", prm.cursor != nil), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - - result := make([]objectcore.Info, 0, prm.count) - - err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.addrList, res.cursor, err = db.listWithCursor(tx, result, prm.count, prm.cursor) - return err - }) - success = err == nil - return res, metaerr.Wrap(err) -} - -func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, cursor *Cursor) ([]objectcore.Info, *Cursor, error) { - threshold := cursor == nil // threshold is a flag to ignore cursor - var bucketName []byte - var err error - - c := tx.Cursor() - name, _ := c.First() - - if !threshold { - name, _ = c.Seek(cursor.bucketName) - } - - var containerID cid.ID - var offset []byte - bc := newBucketCache() - - rawAddr := make([]byte, cidSize, addressKeySize) - - currEpoch := db.epochState.CurrentEpoch() - -loop: - for ; name != nil; name, _ = c.Next() { - cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name) - if cidRaw == nil { - continue - } - - var objType objectSDK.Type - - switch prefix { - case primaryPrefix: - objType = objectSDK.TypeRegular - case lockersPrefix: - objType = objectSDK.TypeLock - case tombstonePrefix: - objType = objectSDK.TypeTombstone - default: - continue - } - - bkt := tx.Bucket(name) - if bkt != nil { - copy(rawAddr, cidRaw) - result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, - result, count, cursor, threshold, currEpoch) - if err != nil { - return nil, nil, err - } - } - bucketName = name - if len(result) >= count { - break loop - } - - // set threshold flag after first `selectNFromBucket` invocation - // first invocation must look for cursor object - threshold = true - } - - if offset != nil { - // new slice is much faster but less memory efficient - // we need to copy, because offset exists during bbolt tx - cursor.inBucketOffset = bytes.Clone(offset) - } - - if len(result) == 0 { - return nil, nil, ErrEndOfListing - } - - // new slice is much faster but less memory efficient - // we need to copy, because bucketName exists during bbolt tx - cursor.bucketName = bytes.Clone(bucketName) - - return result, cursor, nil -} - -// selectNFromBucket similar to selectAllFromBucket but uses cursor to find -// object to start selecting from. Ignores inhumed objects. -func selectNFromBucket( - bc *bucketCache, - bkt *bbolt.Bucket, // main bucket - objType objectSDK.Type, // type of the objects stored in the main bucket - cidRaw []byte, // container ID prefix, optimization - cnt cid.ID, // container ID - to []objectcore.Info, // listing result - limit int, // stop listing at `limit` items in result - cursor *Cursor, // start from cursor object - threshold bool, // ignore cursor and start immediately - currEpoch uint64, -) ([]objectcore.Info, []byte, *Cursor, error) { - if cursor == nil { - cursor = new(Cursor) - } - - c := bkt.Cursor() - k, v := c.First() - - offset := cursor.inBucketOffset - - if !threshold { - c.Seek(offset) - k, v = c.Next() // we are looking for objects _after_ the cursor - } - - for ; k != nil; k, v = c.Next() { - if len(to) >= limit { - break - } - - var obj oid.ID - if err := obj.Decode(k); err != nil { - break - } - - offset = k - graveyardBkt := getGraveyardBucket(bc, bkt.Tx()) - garbageBkt := getGarbageBucket(bc, bkt.Tx()) - if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { - continue - } - - var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { - return nil, nil, nil, err - } - - expEpoch, hasExpEpoch := hasExpirationEpoch(&o) - if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { - continue - } - - var isLinkingObj bool - var ecInfo *objectcore.ECInfo - if objType == objectSDK.TypeRegular { - isLinkingObj = isLinkObject(&o) - ecHeader := o.ECHeader() - if ecHeader != nil { - ecInfo = &objectcore.ECInfo{ - ParentID: ecHeader.Parent(), - Index: ecHeader.Index(), - Total: ecHeader.Total(), - } - } - } - - var a oid.Address - a.SetContainer(cnt) - a.SetObject(obj) - to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) - } - - return to, offset, cursor, nil -} - -func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte) { - if len(name) < bucketKeySize { - return nil, 0 - } - - rawID := name[1:bucketKeySize] - - if err := containerID.Decode(rawID); err != nil { - return nil, 0 - } - - return rawID, name[0] -} - -// IterateOverContainers lists physical containers available in metabase starting from first. -func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers", - trace.WithAttributes( - attribute.Bool("has_handler", prm.Handler != nil), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateOverContainers(ctx, tx, prm) - }) - success = err == nil - return metaerr.Wrap(err) -} - -func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error { - var containerID cid.ID - for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} { - c := tx.Cursor() - for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() { - cidRaw, _ := parseContainerIDWithPrefix(&containerID, name) - if cidRaw == nil { - continue - } - var cnt cid.ID - copy(cnt[:], containerID[:]) - var objType objectSDK.Type - switch prefix[0] { - case primaryPrefix: - objType = objectSDK.TypeRegular - case lockersPrefix: - objType = objectSDK.TypeLock - case tombstonePrefix: - objType = objectSDK.TypeTombstone - default: - continue - } - err := prm.Handler(ctx, objType, cnt) - if err != nil { - return err - } - } - } - - return nil -} - -// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first. -func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer", - trace.WithAttributes( - attribute.Bool("has_handler", prm.Handler != nil), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateOverObjectsInContainer(ctx, tx, prm) - }) - success = err == nil - return metaerr.Wrap(err) -} - -func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error { - var prefix byte - switch prm.ObjectType { - case objectSDK.TypeRegular: - prefix = primaryPrefix - case objectSDK.TypeLock: - prefix = lockersPrefix - case objectSDK.TypeTombstone: - prefix = tombstonePrefix - default: - return nil - } - bucketName := []byte{prefix} - bucketName = append(bucketName, prm.ContainerID[:]...) - - bkt := tx.Bucket(bucketName) - if bkt == nil { - return nil - } - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) - c := bkt.Cursor() - k, v := c.First() - - for ; k != nil; k, v = c.Next() { - var obj oid.ID - if err := obj.Decode(k); err != nil { - break - } - - if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 { - continue - } - - var isLinkingObj bool - var ecInfo *objectcore.ECInfo - if prm.ObjectType == objectSDK.TypeRegular { - var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { - return err - } - isLinkingObj = isLinkObject(&o) - ecHeader := o.ECHeader() - if ecHeader != nil { - ecInfo = &objectcore.ECInfo{ - ParentID: ecHeader.Parent(), - Index: ecHeader.Index(), - Total: ecHeader.Total(), - } - } - } - - var a oid.Address - a.SetContainer(prm.ContainerID) - a.SetObject(obj) - objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo} - err := prm.Handler(ctx, &objInfo) - if err != nil { - return err - } - } - return nil -} - -// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage. -func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return 0, ErrDegradedMode - } - - var prefix byte - switch prm.ObjectType { - case objectSDK.TypeRegular: - prefix = primaryPrefix - case objectSDK.TypeLock: - prefix = lockersPrefix - case objectSDK.TypeTombstone: - prefix = tombstonePrefix - default: - return 0, nil - } - bucketName := []byte{prefix} - bucketName = append(bucketName, prm.ContainerID[:]...) - var count uint64 - err := db.boltDB.View(func(tx *bbolt.Tx) error { - bkt := tx.Bucket(bucketName) - if bkt == nil { - return nil - } - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) - c := bkt.Cursor() - k, _ := c.First() - for ; k != nil; k, _ = c.Next() { - if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 { - continue - } - count++ - } - return nil - }) - success = err == nil - return count, metaerr.Wrap(err) -} diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go deleted file mode 100644 index 02985991c..000000000 --- a/pkg/local_object_storage/metabase/list_test.go +++ /dev/null @@ -1,304 +0,0 @@ -package meta_test - -import ( - "context" - "errors" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -func BenchmarkListWithCursor(b *testing.B) { - db := listWithCursorPrepareDB(b) - defer func() { require.NoError(b, db.Close(context.Background())) }() - - b.Run("1 item", func(b *testing.B) { - benchmarkListWithCursor(b, db, 1) - }) - b.Run("10 items", func(b *testing.B) { - benchmarkListWithCursor(b, db, 10) - }) - b.Run("100 items", func(b *testing.B) { - benchmarkListWithCursor(b, db, 100) - }) -} - -func listWithCursorPrepareDB(b *testing.B) *meta.DB { - db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ - NoSync: true, - })) // faster single-thread generation - - obj := testutil.GenerateObject() - for i := range 100_000 { // should be a multiple of all batch sizes - obj.SetID(oidtest.ID()) - if i%9 == 0 { // let's have 9 objects per container - obj.SetContainerID(cidtest.ID()) - } - require.NoError(b, putBig(db, obj)) - } - return db -} - -func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { - var prm meta.ListPrm - prm.SetCount(uint32(batchSize)) - - b.ResetTimer() - b.ReportAllocs() - for range b.N { - res, err := db.ListWithCursor(context.Background(), prm) - if err != nil { - if !errors.Is(err, meta.ErrEndOfListing) { - b.Fatalf("error: %v", err) - } - prm.SetCursor(nil) - } else if ln := len(res.AddressList()); ln != batchSize { - b.Fatalf("invalid batch size: %d", ln) - } else { - prm.SetCursor(res.Cursor()) - } - } -} - -func TestLisObjectsWithCursor(t *testing.T) { - t.Parallel() - - const ( - currEpoch = 100 - expEpoch = currEpoch - 1 - containers = 5 - total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired - ) - - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - expected := make([]object.Info, 0, total) - - // fill metabase with objects - for range containers { - containerID := cidtest.ID() - - // add one regular object - obj := testutil.GenerateObjectWithCID(containerID) - obj.SetType(objectSDK.TypeRegular) - err := putBig(db, obj) - require.NoError(t, err) - expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) - - // add one tombstone - obj = testutil.GenerateObjectWithCID(containerID) - obj.SetType(objectSDK.TypeTombstone) - err = putBig(db, obj) - require.NoError(t, err) - expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeTombstone}) - - // add one lock - obj = testutil.GenerateObjectWithCID(containerID) - obj.SetType(objectSDK.TypeLock) - err = putBig(db, obj) - require.NoError(t, err) - expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeLock}) - - // add one inhumed (do not include into expected) - obj = testutil.GenerateObjectWithCID(containerID) - obj.SetType(objectSDK.TypeRegular) - err = putBig(db, obj) - require.NoError(t, err) - ts := testutil.GenerateObjectWithCID(containerID) - err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object()) - require.NoError(t, err) - - // add one child object (do not include parent into expected) - splitID := objectSDK.NewSplitID() - parent := testutil.GenerateObjectWithCID(containerID) - testutil.AddAttribute(parent, "foo", "bar") - child := testutil.GenerateObjectWithCID(containerID) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - child.SetSplitID(splitID) - err = putBig(db, child) - require.NoError(t, err) - expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular}) - - // add expired object (do not include into expected) - obj = testutil.GenerateObjectWithCID(containerID) - testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) - require.NoError(t, metaPut(db, obj, nil)) - - // add non-expired object (include into expected) - obj = testutil.GenerateObjectWithCID(containerID) - testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch)) - require.NoError(t, metaPut(db, obj, nil)) - expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) - - // add locked expired object (include into expected) - obj = testutil.GenerateObjectWithCID(containerID) - objID := oidtest.ID() - obj.SetID(objID) - testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) - require.NoError(t, metaPut(db, obj, nil)) - require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID})) - expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) - } - - t.Run("success with various count", func(t *testing.T) { - for countPerReq := 1; countPerReq <= total; countPerReq++ { - got := make([]object.Info, 0, total) - - res, cursor, err := metaListWithCursor(db, uint32(countPerReq), nil) - require.NoError(t, err, "count:%d", countPerReq) - got = append(got, res...) - - expectedIterations := total / countPerReq - if total%countPerReq == 0 { // remove initial list if aligned - expectedIterations-- - } - - for range expectedIterations { - res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor) - require.NoError(t, err, "count:%d", countPerReq) - got = append(got, res...) - } - - _, _, err = metaListWithCursor(db, uint32(countPerReq), cursor) - require.ErrorIs(t, err, meta.ErrEndOfListing, "count:%d", countPerReq, cursor) - require.ElementsMatch(t, expected, got, "count:%d", countPerReq) - } - }) - - t.Run("invalid count", func(t *testing.T) { - _, _, err := metaListWithCursor(db, 0, nil) - require.ErrorIs(t, err, meta.ErrEndOfListing) - }) -} - -func TestAddObjectDuringListingWithCursor(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - const total = 5 - - expected := make(map[string]int, total) - - // fill metabase with objects - for range total { - obj := testutil.GenerateObject() - err := putBig(db, obj) - require.NoError(t, err) - expected[object.AddressOf(obj).EncodeToString()] = 0 - } - - // get half of the objects - got, cursor, err := metaListWithCursor(db, total/2, nil) - require.NoError(t, err) - for _, obj := range got { - if _, ok := expected[obj.Address.EncodeToString()]; ok { - expected[obj.Address.EncodeToString()]++ - } - } - - // add new objects - for range total { - obj := testutil.GenerateObject() - err = putBig(db, obj) - require.NoError(t, err) - } - - // get remaining objects - for { - got, cursor, err = metaListWithCursor(db, total, cursor) - if errors.Is(err, meta.ErrEndOfListing) { - break - } - for _, obj := range got { - if _, ok := expected[obj.Address.EncodeToString()]; ok { - expected[obj.Address.EncodeToString()]++ - } - } - } - - // check if all expected objects were fetched after database update - for _, v := range expected { - require.Equal(t, 1, v) - } -} - -func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]object.Info, *meta.Cursor, error) { - var listPrm meta.ListPrm - listPrm.SetCount(count) - listPrm.SetCursor(cursor) - - r, err := db.ListWithCursor(context.Background(), listPrm) - return r.AddressList(), r.Cursor(), err -} - -func TestIterateOver(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - const total uint64 = 5 - for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} { - var expected []*objectSDK.Object - // fill metabase with objects - cid := cidtest.ID() - for range total { - obj := testutil.GenerateObjectWithCID(cid) - obj.SetType(typ) - err := metaPut(db, obj, nil) - require.NoError(t, err) - expected = append(expected, obj) - } - - var metaIter meta.IterateOverObjectsInContainerPrm - var count uint64 - metaIter.Handler = func(context.Context, *object.Info) error { - count++ - return nil - } - metaIter.ContainerID = cid - metaIter.ObjectType = typ - err := db.IterateOverObjectsInContainer(context.Background(), metaIter) - require.NoError(t, err) - require.Equal(t, total, count) - - var metaCount meta.CountAliveObjectsInContainerPrm - metaCount.ContainerID = cid - metaCount.ObjectType = typ - res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount) - require.NoError(t, err) - require.Equal(t, res, total) - - err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1])) - require.NoError(t, err) - - res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount) - require.NoError(t, err) - require.Equal(t, uint64(3), res) - } - var count int - var metaPrm meta.IterateOverContainersPrm - metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error { - count++ - return nil - } - err := db.IterateOverContainers(context.Background(), metaPrm) - require.NoError(t, err) - require.Equal(t, 3, count) -} diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go deleted file mode 100644 index f4cb9e53b..000000000 --- a/pkg/local_object_storage/metabase/lock.go +++ /dev/null @@ -1,389 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "fmt" - "slices" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var bucketNameLocked = []byte{lockedPrefix} - -type keyValue struct { - Key []byte - Value []byte -} - -// returns name of the bucket with objects of type LOCK for specified container. -func bucketNameLockers(idCnr cid.ID, key []byte) []byte { - return bucketName(idCnr, lockersPrefix, key) -} - -// Lock marks objects as locked with another object. All objects are from the -// specified container. -// -// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject). -// -// Locked list should be unique. Panics if it is empty. -func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.ID) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Lock", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Lock", - trace.WithAttributes( - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("locker", locker.EncodeToString()), - attribute.Int("locked_count", len(locked)), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } else if db.mode.ReadOnly() { - return ErrReadOnlyMode - } - - assert.False(len(locked) == 0, "empty locked list") - - err := db.lockInternal(locked, cnr, locker) - success = err == nil - return err -} - -func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error { - bucketKeysLocked := make([][]byte, len(locked)) - for i := range locked { - bucketKeysLocked[i] = objectKey(locked[i], make([]byte, objectKeySize)) - } - key := make([]byte, cidSize) - - return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error { - if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular { - return logicerr.Wrap(new(apistatus.LockNonRegularObject)) - } - - bucketLocked := tx.Bucket(bucketNameLocked) - - cnr.Encode(key) - bucketLockedContainer, err := bucketLocked.CreateBucketIfNotExists(key) - if err != nil { - return fmt.Errorf("create container bucket for locked objects %v: %w", cnr, err) - } - - keyLocker := objectKey(locker, key) - var exLockers [][]byte - var updLockers []byte - - loop: - for i := range bucketKeysLocked { - exLockers, err = decodeList(bucketLockedContainer.Get(bucketKeysLocked[i])) - if err != nil { - return fmt.Errorf("decode list of object lockers: %w", err) - } - - for i := range exLockers { - if bytes.Equal(exLockers[i], keyLocker) { - continue loop - } - } - - updLockers, err = encodeList(append(exLockers, keyLocker)) - if err != nil { - return fmt.Errorf("encode list of object lockers: %w", err) - } - - err = bucketLockedContainer.Put(bucketKeysLocked[i], updLockers) - if err != nil { - return fmt.Errorf("update list of object lockers: %w", err) - } - } - - return nil - })) -} - -// FreeLockedBy unlocks all objects in DB which are locked by lockers. -// Returns slice of unlocked object ID's or an error. -func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("FreeLockedBy", time.Since(startedAt), success) - }() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - var unlockedObjects []oid.Address - - if err := db.boltDB.Batch(func(tx *bbolt.Tx) error { - for i := range lockers { - unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object()) - if err != nil { - return err - } - unlockedObjects = append(unlockedObjects, unlocked...) - } - - return nil - }); err != nil { - return nil, metaerr.Wrap(err) - } - success = true - return unlockedObjects, nil -} - -// checks if specified object is locked in the specified container. -func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - return objectLockedWithCache(nil, tx, idCnr, idObj) -} - -func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - bucketLocked := getLockedBucket(bc, tx) - if bucketLocked != nil { - key := make([]byte, cidSize) - idCnr.Encode(key) - bucketLockedContainer := bucketLocked.Bucket(key) - if bucketLockedContainer != nil { - return bucketLockedContainer.Get(objectKey(idObj, key)) != nil - } - } - - return false -} - -// return `LOCK` id's if specified object is locked in the specified container. -func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { - var lockers []oid.ID - bucketLocked := tx.Bucket(bucketNameLocked) - if bucketLocked != nil { - key := make([]byte, cidSize) - idCnr.Encode(key) - bucketLockedContainer := bucketLocked.Bucket(key) - if bucketLockedContainer != nil { - binObjIDs, err := decodeList(bucketLockedContainer.Get(objectKey(idObj, key))) - if err != nil { - return nil, fmt.Errorf("decode list of object lockers: %w", err) - } - for _, binObjID := range binObjIDs { - var id oid.ID - if err = id.Decode(binObjID); err != nil { - return nil, err - } - lockers = append(lockers, id) - } - } - } - return lockers, nil -} - -// releases all records about the objects locked by the locker. -// Returns slice of unlocked object ID's or an error. -// -// Operation is very resource-intensive, which is caused by the admissibility -// of multiple locks. Also, if we knew what objects are locked, it would be -// possible to speed up the execution. -func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Address, error) { - var unlockedObjects []oid.Address - bucketLocked := tx.Bucket(bucketNameLocked) - if bucketLocked == nil { - return unlockedObjects, nil - } - - key := make([]byte, cidSize) - idCnr.Encode(key) - - bucketLockedContainer := bucketLocked.Bucket(key) - if bucketLockedContainer == nil { - return unlockedObjects, nil - } - - keyLocker := objectKey(locker, key) - updates := make([]keyValue, 0) - err := bucketLockedContainer.ForEach(func(k, v []byte) error { - keyLockers, err := decodeList(v) - if err != nil { - return fmt.Errorf("decode list of lockers in locked bucket: %w", err) - } - - for i := range keyLockers { - if bytes.Equal(keyLockers[i], keyLocker) { - if len(keyLockers) == 1 { - updates = append(updates, keyValue{ - Key: k, - Value: nil, - }) - - var id oid.ID - err = id.Decode(k) - if err != nil { - return fmt.Errorf("decode unlocked object id error: %w", err) - } - - var addr oid.Address - addr.SetContainer(idCnr) - addr.SetObject(id) - - unlockedObjects = append(unlockedObjects, addr) - } else { - // exclude locker - keyLockers = slices.Delete(keyLockers, i, i+1) - - v, err = encodeList(keyLockers) - if err != nil { - return fmt.Errorf("encode updated list of lockers: %w", err) - } - - updates = append(updates, keyValue{ - Key: k, - Value: v, - }) - } - - return nil - } - } - - return nil - }) - if err != nil { - return nil, err - } - - if err = applyBucketUpdates(bucketLockedContainer, updates); err != nil { - return nil, err - } - - return unlockedObjects, nil -} - -func applyBucketUpdates(bucket *bbolt.Bucket, updates []keyValue) error { - for _, update := range updates { - if update.Value == nil { - err := bucket.Delete(update.Key) - if err != nil { - return fmt.Errorf("delete locked object record from locked bucket: %w", err) - } - } else { - err := bucket.Put(update.Key, update.Value) - if err != nil { - return fmt.Errorf("update list of lockers: %w", err) - } - } - } - return nil -} - -// IsLockedPrm groups the parameters of IsLocked operation. -type IsLockedPrm struct { - addr oid.Address -} - -// SetAddress sets object address that will be checked for lock relations. -func (i *IsLockedPrm) SetAddress(addr oid.Address) { - i.addr = addr -} - -// IsLockedRes groups the resulting values of IsLocked operation. -type IsLockedRes struct { - locked bool -} - -// Locked describes the requested object status according to the metabase -// current state. -func (i IsLockedRes) Locked() bool { - return i.locked -} - -// IsLocked checks is the provided object is locked by any `LOCK`. Not found -// object is considered as non-locked. -// -// Returns only non-logical errors related to underlying database. -func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IsLocked", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.IsLocked", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res.locked = objectLocked(tx, prm.addr.Container(), prm.addr.Object()) - return nil - })) - success = err == nil - return res, err -} - -// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found -// object is considered as non-locked. -// -// Returns only non-logical errors related to underlying database. -func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks", - trace.WithAttributes( - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res, err = getLocks(tx, addr.Container(), addr.Object()) - return nil - })) - success = err == nil - return res, err -} diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go deleted file mode 100644 index 341ff9ad1..000000000 --- a/pkg/local_object_storage/metabase/lock_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" - "github.com/stretchr/testify/require" -) - -func TestDB_Lock(t *testing.T) { - t.Parallel() - - cnr := cidtest.ID() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - t.Run("empty locked list", func(t *testing.T) { - require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) }) - require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, []oid.ID{}) }) - }) - - t.Run("(ir)regular", func(t *testing.T) { - for _, typ := range [...]objectSDK.Type{ - objectSDK.TypeTombstone, - objectSDK.TypeLock, - objectSDK.TypeRegular, - } { - obj := objecttest.Object() - obj.SetType(typ) - obj.SetContainerID(cnr) - - // save irregular object - err := metaPut(db, obj, nil) - require.NoError(t, err, typ) - - var e *apistatus.LockNonRegularObject - - id, _ := obj.ID() - - // try to lock it - err = db.Lock(context.Background(), cnr, oidtest.ID(), []oid.ID{id}) - if typ == objectSDK.TypeRegular { - require.NoError(t, err, typ) - } else { - require.ErrorAs(t, err, &e, typ) - } - } - }) - - t.Run("removing lock object", func(t *testing.T) { - objs, lockObj := putAndLockObj(t, db, 1) - - objAddr := objectcore.AddressOf(objs[0]) - lockAddr := objectcore.AddressOf(lockObj) - - var inhumePrm meta.InhumePrm - inhumePrm.SetGCMark() - - // check locking relation - - var objLockedErr *apistatus.ObjectLocked - - inhumePrm.SetAddresses(objAddr) - _, err := db.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - tombAddr := oidtest.Address() - tombAddr.SetContainer(objAddr.Container()) - inhumePrm.SetTombstoneAddress(tombAddr) - _, err = db.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - // try to remove lock object - inhumePrm.SetAddresses(lockAddr) - _, err = db.Inhume(context.Background(), inhumePrm) - require.Error(t, err) - - // check that locking relation has not been - // dropped - - inhumePrm.SetAddresses(objAddr) - _, err = db.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - tombAddr = oidtest.Address() - tombAddr.SetContainer(objAddr.Container()) - inhumePrm.SetTombstoneAddress(tombAddr) - _, err = db.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - }) - - t.Run("lock-unlock scenario", func(t *testing.T) { - objs, lockObj := putAndLockObj(t, db, 1) - - objAddr := objectcore.AddressOf(objs[0]) - lockAddr := objectcore.AddressOf(lockObj) - - var objLockedErr *apistatus.ObjectLocked - - // try to inhume locked object using tombstone - err := metaInhume(db, objAddr, lockAddr.Object()) - require.ErrorAs(t, err, &objLockedErr) - - // free locked object - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(lockAddr) - inhumePrm.SetForceGCMark() - inhumePrm.SetLockObjectHandling() - - res, err := db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 1) - require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0]) - - _, err = db.FreeLockedBy([]oid.Address{lockAddr}) - require.NoError(t, err) - - inhumePrm.SetAddresses(objAddr) - inhumePrm.SetGCMark() - - // now we can inhume the object - _, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - }) - - t.Run("force removing lock objects", func(t *testing.T) { - const objsNum = 3 - - // put and lock `objsNum` objects - objs, lockObj := putAndLockObj(t, db, objsNum) - - // force remove objects - - var inhumePrm meta.InhumePrm - inhumePrm.SetForceGCMark() - inhumePrm.SetAddresses(objectcore.AddressOf(lockObj)) - inhumePrm.SetLockObjectHandling() - - res, err := db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 1) - require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0]) - - // unlock just objects that were locked by - // just removed locker - _, err = db.FreeLockedBy([]oid.Address{res.DeletedLockObjects()[0]}) - require.NoError(t, err) - - // removing objects after unlock - - inhumePrm.SetGCMark() - - for i := range objsNum { - inhumePrm.SetAddresses(objectcore.AddressOf(objs[i])) - - res, err = db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 0) - } - }) - - t.Run("skipping lock object handling", func(t *testing.T) { - _, lockObj := putAndLockObj(t, db, 1) - - var inhumePrm meta.InhumePrm - inhumePrm.SetForceGCMark() - inhumePrm.SetAddresses(objectcore.AddressOf(lockObj)) - - res, err := db.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 0) - }) -} - -func TestDB_Lock_Expired(t *testing.T) { - t.Parallel() - - es := &epochState{e: 123} - - db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - // put an object - addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124) - - // expire the obj - es.e = 125 - _, err := metaGet(db, addr, false) - require.ErrorIs(t, err, meta.ErrObjectIsExpired) - - // lock the obj - require.NoError(t, db.Lock(context.Background(), addr.Container(), oidtest.ID(), []oid.ID{addr.Object()})) - - // object is expired but locked, thus, must be available - _, err = metaGet(db, addr, false) - require.NoError(t, err) -} - -func TestDB_IsLocked(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - // existing and locked objs - - objs, _ := putAndLockObj(t, db, 5) - var prm meta.IsLockedPrm - - for _, obj := range objs { - prm.SetAddress(objectcore.AddressOf(obj)) - - res, err := db.IsLocked(context.Background(), prm) - require.NoError(t, err) - - require.True(t, res.Locked()) - } - - // some rand obj - - prm.SetAddress(oidtest.Address()) - - res, err := db.IsLocked(context.Background(), prm) - require.NoError(t, err) - - require.False(t, res.Locked()) - - // existing but not locked obj - - obj := objecttest.Object() - - var putPrm meta.PutPrm - putPrm.SetObject(obj) - - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - prm.SetAddress(objectcore.AddressOf(obj)) - - res, err = db.IsLocked(context.Background(), prm) - require.NoError(t, err) - - require.False(t, res.Locked()) -} - -// putAndLockObj puts object, returns it and its locker. -func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK.Object, *objectSDK.Object) { - cnr := cidtest.ID() - - lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs) - lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs) - - for range numOfLockedObjs { - obj := testutil.GenerateObjectWithCID(cnr) - err := putBig(db, obj) - require.NoError(t, err) - - id, _ := obj.ID() - - lockedObjs = append(lockedObjs, obj) - lockedObjIDs = append(lockedObjIDs, id) - } - - lockObj := testutil.GenerateObjectWithCID(cnr) - lockID, _ := lockObj.ID() - lockObj.SetType(objectSDK.TypeLock) - - err := putBig(db, lockObj) - require.NoError(t, err) - - err = db.Lock(context.Background(), cnr, lockID, lockedObjIDs) - require.NoError(t, err) - - return lockedObjs, lockObj -} diff --git a/pkg/local_object_storage/metabase/metrics.go b/pkg/local_object_storage/metabase/metrics.go deleted file mode 100644 index d673560c7..000000000 --- a/pkg/local_object_storage/metabase/metrics.go +++ /dev/null @@ -1,23 +0,0 @@ -package meta - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -type Metrics interface { - SetParentID(parentID string) - - SetMode(m mode.ComponentMode) - Close() - - AddMethodDuration(method string, d time.Duration, success bool) -} - -type noopMetrics struct{} - -func (m *noopMetrics) SetParentID(string) {} -func (m *noopMetrics) SetMode(mode.ComponentMode) {} -func (m *noopMetrics) Close() {} -func (m *noopMetrics) AddMethodDuration(string, time.Duration, bool) {} diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go deleted file mode 100644 index 7edb96384..000000000 --- a/pkg/local_object_storage/metabase/mode.go +++ /dev/null @@ -1,41 +0,0 @@ -package meta - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -// SetMode sets the metabase mode of operation. -// If the mode assumes no operation metabase, the database is closed. -func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { - db.modeMtx.Lock() - defer db.modeMtx.Unlock() - - if db.mode == m { - return nil - } - - if !db.mode.NoMetabase() { - if err := db.Close(ctx); err != nil { - return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) - } - } - - if m.NoMetabase() { - db.boltDB = nil - } else { - err := db.openDB(ctx, m) - if err == nil && !m.ReadOnly() { - err = db.Init(ctx) - } - if err != nil { - return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) - } - } - - db.mode = m - db.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) - return nil -} diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go deleted file mode 100644 index 28b42283f..000000000 --- a/pkg/local_object_storage/metabase/mode_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package meta - -import ( - "context" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -type epochStateTest struct{} - -func (s epochStateTest) CurrentEpoch() uint64 { - return 0 -} - -func Test_Mode(t *testing.T) { - t.Parallel() - bdb := New([]Option{ - WithPath(filepath.Join(t.TempDir(), "metabase")), - WithPermissions(0o600), - WithEpochState(epochStateTest{}), - }...) - - require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly)) - require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init(context.Background())) - require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close(context.Background())) - - require.NoError(t, bdb.Open(context.Background(), mode.Degraded)) - require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init(context.Background())) - require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close(context.Background())) -} diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go deleted file mode 100644 index 5e1bbfe9e..000000000 --- a/pkg/local_object_storage/metabase/put.go +++ /dev/null @@ -1,660 +0,0 @@ -package meta - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - gio "io" - "strconv" - "time" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/nspcc-dev/neo-go/pkg/io" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type ( - namedBucketItem struct { - name, key, val []byte - } -) - -// PutPrm groups the parameters of Put operation. -type PutPrm struct { - obj *objectSDK.Object - - id []byte - - indexAttributes bool -} - -// PutRes groups the resulting values of Put operation. -type PutRes struct { - Inserted bool -} - -// SetObject is a Put option to set object to save. -func (p *PutPrm) SetObject(obj *objectSDK.Object) { - p.obj = obj -} - -// SetStorageID is a Put option to set storage ID to save. -func (p *PutPrm) SetStorageID(id []byte) { - p.id = id -} - -func (p *PutPrm) SetIndexAttributes(v bool) { - p.indexAttributes = v -} - -var ( - ErrUnknownObjectType = errors.New("unknown object type") - ErrIncorrectRootObject = errors.New("invalid root object") -) - -// Put saves object header in metabase. Object payload expected to be cut. -// -// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard. -// Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Put", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Put", - trace.WithAttributes( - attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } else if db.mode.ReadOnly() { - return res, ErrReadOnlyMode - } - - currEpoch := db.epochState.CurrentEpoch() - - err = db.boltDB.Batch(func(tx *bbolt.Tx) error { - var e error - res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes) - return e - }) - if err == nil { - success = true - storagelog.Write(ctx, db.log, - storagelog.AddressField(objectCore.AddressOf(prm.obj)), - storagelog.OpField("metabase PUT")) - } - - return res, metaerr.Wrap(err) -} - -func (db *DB) put(tx *bbolt.Tx, - obj *objectSDK.Object, - id []byte, - si *objectSDK.SplitInfo, - currEpoch uint64, - indexAttributes bool, -) (PutRes, error) { - cnr, ok := obj.ContainerID() - if !ok { - return PutRes{}, errors.New("missing container in object") - } - - var ecParentAddress oid.Address - if ecHeader := obj.ECHeader(); ecHeader != nil { - ecParentAddress.SetContainer(cnr) - ecParentAddress.SetObject(ecHeader.Parent()) - } - - isParent := si != nil - - exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch) - - var splitInfoError *objectSDK.SplitInfoError - if errors.As(err, &splitInfoError) { - exists = true // object exists, however it is virtual - } else if err != nil { - return PutRes{}, err // return any error besides SplitInfoError - } - - if exists { - return PutRes{}, db.updateObj(tx, obj, id, si, isParent) - } - - return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes) -} - -func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error { - // most right child and split header overlap parent so we have to - // check if object exists to not overwrite it twice - - // When storage engine moves objects between different sub-storages, - // it calls metabase.Put method with new storage ID, thus triggering this code. - if !isParent && id != nil { - return setStorageID(tx, objectCore.AddressOf(obj), id, true) - } - - // when storage already has last object in split hierarchy and there is - // a linking object to put (or vice versa), we should update split info - // with object ids of these objects - if isParent { - return updateSplitInfo(tx, objectCore.AddressOf(obj), si) - } - - return nil -} - -func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error { - if par := obj.Parent(); par != nil && !isParent { // limit depth by two - parentSI, err := splitInfoFromObject(obj) - if err != nil { - return err - } - - _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes) - if err != nil { - return err - } - } - - err := putUniqueIndexes(tx, obj, si, id) - if err != nil { - return fmt.Errorf("put unique indexes: %w", err) - } - - err = updateListIndexes(tx, obj, putListIndexItem) - if err != nil { - return fmt.Errorf("put list indexes: %w", err) - } - - if indexAttributes { - err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) - if err != nil { - return fmt.Errorf("put fake bucket tree indexes: %w", err) - } - } - - // update container volume size estimation - if obj.Type() == objectSDK.TypeRegular && !isParent { - err = changeContainerSize(tx, cnr, obj.PayloadSize(), true) - if err != nil { - return err - } - } - - if !isParent { - if err = db.incCounters(tx, cnr, IsUserObject(obj)); err != nil { - return err - } - } - - return nil -} - -func putUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, si *objectSDK.SplitInfo, id []byte) error { - isParent := si != nil - addr := objectCore.AddressOf(obj) - objKey := objectKey(addr.Object(), make([]byte, objectKeySize)) - - bucketName := make([]byte, bucketKeySize) - if !isParent { - err := putRawObjectData(tx, obj, bucketName, addr, objKey) - if err != nil { - return err - } - if id != nil { - if err = setStorageID(tx, objectCore.AddressOf(obj), id, false); err != nil { - return err - } - } - } - - if err := putExpirationEpoch(tx, obj, addr, objKey); err != nil { - return err - } - - return putSplitInfo(tx, obj, bucketName, addr, si, objKey) -} - -func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, objKey []byte) error { - switch obj.Type() { - case objectSDK.TypeRegular: - bucketName = primaryBucketName(addr.Container(), bucketName) - case objectSDK.TypeTombstone: - bucketName = tombstoneBucketName(addr.Container(), bucketName) - case objectSDK.TypeLock: - bucketName = bucketNameLockers(addr.Container(), bucketName) - default: - return ErrUnknownObjectType - } - rawObject, err := obj.CutPayload().Marshal() - if err != nil { - return fmt.Errorf("marshal object header: %w", err) - } - return putUniqueIndexItem(tx, namedBucketItem{ - name: bucketName, - key: objKey, - val: rawObject, - }) -} - -func putExpirationEpoch(tx *bbolt.Tx, obj *objectSDK.Object, addr oid.Address, objKey []byte) error { - if expEpoch, ok := hasExpirationEpoch(obj); ok { - err := putUniqueIndexItem(tx, namedBucketItem{ - name: expEpochToObjectBucketName, - key: expirationEpochKey(expEpoch, addr.Container(), addr.Object()), - val: zeroValue, - }) - if err != nil { - return err - } - val := make([]byte, epochSize) - binary.LittleEndian.PutUint64(val, expEpoch) - err = putUniqueIndexItem(tx, namedBucketItem{ - name: objectToExpirationEpochBucketName(addr.Container(), make([]byte, bucketKeySize)), - key: objKey, - val: val, - }) - if err != nil { - return err - } - } - return nil -} - -func putSplitInfo(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, si *objectSDK.SplitInfo, objKey []byte) error { - if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() { - if ecHead := obj.ECHeader(); ecHead != nil { - parentID := ecHead.Parent() - if ecHead.ParentSplitID() != nil { - parentSplitParentID := ecHead.ParentSplitParentID() - if parentSplitParentID == nil { - return nil - } - - si = objectSDK.NewSplitInfo() - si.SetSplitID(ecHead.ParentSplitID()) - si.SetLastPart(ecHead.Parent()) - - parentID = *parentSplitParentID - } - objKey = objectKey(parentID, objKey) - } - return updateSplitInfoIndex(tx, objKey, addr.Container(), bucketName, si) - } - return nil -} - -func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName []byte, si *objectSDK.SplitInfo) error { - return updateUniqueIndexItem(tx, namedBucketItem{ - name: rootBucketName(cnr, bucketName), - key: objKey, - }, func(old, _ []byte) ([]byte, error) { - switch { - case si == nil && old == nil: - return []byte{}, nil - case si == nil: - return old, nil - case old == nil: - return si.Marshal() - default: - oldSI := objectSDK.NewSplitInfo() - if err := oldSI.Unmarshal(old); err != nil { - return nil, err - } - si = util.MergeSplitInfo(si, oldSI) - return si.Marshal() - } - }) -} - -type updateIndexItemFunc = func(tx *bbolt.Tx, item namedBucketItem) error - -func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error { - idObj, _ := obj.ID() - cnr, _ := obj.ContainerID() - objKey := objectKey(idObj, make([]byte, objectKeySize)) - bucketName := make([]byte, bucketKeySize) - - idParent, ok := obj.ParentID() - - // index parent ids - if ok { - err := f(tx, namedBucketItem{ - name: parentBucketName(cnr, bucketName), - key: objectKey(idParent, make([]byte, objectKeySize)), - val: objKey, - }) - if err != nil { - return err - } - } - - // index split ids - if obj.SplitID() != nil { - err := f(tx, namedBucketItem{ - name: splitBucketName(cnr, bucketName), - key: obj.SplitID().ToV2(), - val: objKey, - }) - if err != nil { - return err - } - } - - if ech := obj.ECHeader(); ech != nil { - err := f(tx, namedBucketItem{ - name: ecInfoBucketName(cnr, bucketName), - key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - val: objKey, - }) - if err != nil { - return err - } - - if ech.ParentSplitID() != nil { - objKey := objectKey(ech.Parent(), make([]byte, objectKeySize)) - err := f(tx, namedBucketItem{ - name: splitBucketName(cnr, bucketName), - key: ech.ParentSplitID().ToV2(), - val: objKey, - }) - if err != nil { - return err - } - } - - if parentSplitParentID := ech.ParentSplitParentID(); parentSplitParentID != nil { - objKey := objectKey(ech.Parent(), make([]byte, objectKeySize)) - err := f(tx, namedBucketItem{ - name: parentBucketName(cnr, bucketName), - key: objectKey(*parentSplitParentID, make([]byte, objectKeySize)), - val: objKey, - }) - if err != nil { - return err - } - } - } - - return nil -} - -var indexedAttributes = map[string]struct{}{ - "S3-Access-Box-CRDT-Name": {}, - objectSDK.AttributeFilePath: {}, -} - -// IsAtrributeIndexed returns True if attribute is indexed by metabase. -func IsAtrributeIndexed(attr string) bool { - _, found := indexedAttributes[attr] - return found -} - -func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error { - id, _ := obj.ID() - cnr, _ := obj.ContainerID() - objKey := objectKey(id, make([]byte, objectKeySize)) - - key := make([]byte, bucketKeySize) - var attrs []objectSDK.Attribute - if obj.ECHeader() != nil { - attrs = obj.ECHeader().ParentAttributes() - objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize)) - } else { - attrs = obj.Attributes() - } - - // user specified attributes - for i := range attrs { - if !IsAtrributeIndexed(attrs[i].Key()) { - continue - } - key = attributeBucketName(cnr, attrs[i].Key(), key) - err := f(tx, namedBucketItem{ - name: key, - key: []byte(attrs[i].Value()), - val: objKey, - }) - if err != nil { - return err - } - } - - return nil -} - -func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) { - attributes := obj.Attributes() - if ech := obj.ECHeader(); ech != nil { - attributes = ech.ParentAttributes() - } - for _, attr := range attributes { - if attr.Key() == objectV2.SysAttributeExpEpoch { - expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64) - return expEpoch, err == nil - } - } - return 0, false -} - -type bucketContainer interface { - Bucket([]byte) *bbolt.Bucket - CreateBucket([]byte) (*bbolt.Bucket, error) - CreateBucketIfNotExists([]byte) (*bbolt.Bucket, error) -} - -func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Bucket, error) { - if bkt := tx.Bucket(name); bkt != nil { - return bkt, nil - } - return tx.CreateBucket(name) -} - -func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error { - bkt, err := createBucketLikelyExists(tx, item.name) - if err != nil { - return fmt.Errorf("create index %v: %w", item.name, err) - } - - data, err := update(bkt.Get(item.key), item.val) - if err != nil { - return err - } - return bkt.Put(item.key, data) -} - -func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil }) -} - -func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - bkt, err := createBucketLikelyExists(tx, item.name) - if err != nil { - return fmt.Errorf("create index %v: %w", item.name, err) - } - - fkbtRoot, err := createBucketLikelyExists(bkt, item.key) - if err != nil { - return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err) - } - - return fkbtRoot.Put(item.val, zeroValue) -} - -func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { - bkt, err := createBucketLikelyExists(tx, item.name) - if err != nil { - return fmt.Errorf("create index %v: %w", item.name, err) - } - - lst, err := decodeList(bkt.Get(item.key)) - if err != nil { - return fmt.Errorf("decode leaf list %v: %w", item.key, err) - } - - lst = append(lst, item.val) - - encodedLst, err := encodeList(lst) - if err != nil { - return fmt.Errorf("encode leaf list %v: %w", item.key, err) - } - - return bkt.Put(item.key, encodedLst) -} - -// encodeList decodes list of bytes into a single blog for list bucket indexes. -func encodeList(lst [][]byte) ([]byte, error) { - w := io.NewBufBinWriter() - w.WriteVarUint(uint64(len(lst))) - for i := range lst { - w.WriteVarBytes(lst[i]) - } - if w.Err != nil { - return nil, w.Err - } - return w.Bytes(), nil -} - -// decodeList decodes blob into the list of bytes from list bucket index. -func decodeList(data []byte) (lst [][]byte, err error) { - if len(data) == 0 { - return nil, nil - } - - var offset uint64 - size, n, err := getVarUint(data) - if err != nil { - return nil, err - } - - offset += uint64(n) - lst = make([][]byte, size, size+1) - for i := range lst { - sz, n, err := getVarUint(data[offset:]) - if err != nil { - return nil, err - } - offset += uint64(n) - - next := offset + sz - if uint64(len(data)) < next { - return nil, gio.ErrUnexpectedEOF - } - lst[i] = data[offset:next] - offset = next - } - return lst, nil -} - -func getVarUint(data []byte) (uint64, int, error) { - if len(data) == 0 { - return 0, 0, gio.ErrUnexpectedEOF - } - - switch b := data[0]; b { - case 0xfd: - if len(data) < 3 { - return 0, 1, gio.ErrUnexpectedEOF - } - return uint64(binary.LittleEndian.Uint16(data[1:])), 3, nil - case 0xfe: - if len(data) < 5 { - return 0, 1, gio.ErrUnexpectedEOF - } - return uint64(binary.LittleEndian.Uint32(data[1:])), 5, nil - case 0xff: - if len(data) < 9 { - return 0, 1, gio.ErrUnexpectedEOF - } - return binary.LittleEndian.Uint64(data[1:]), 9, nil - default: - return uint64(b), 1, nil - } -} - -// setStorageID for existing objects if they were moved from one -// storage location to another. -func setStorageID(tx *bbolt.Tx, addr oid.Address, id []byte, override bool) error { - key := make([]byte, bucketKeySize) - bkt, err := createBucketLikelyExists(tx, smallBucketName(addr.Container(), key)) - if err != nil { - return err - } - key = objectKey(addr.Object(), key) - if override || bkt.Get(key) == nil { - return bkt.Put(key, id) - } - return nil -} - -// updateSpliInfo for existing objects if storage filled with extra information -// about last object in split hierarchy or linking object. -func updateSplitInfo(tx *bbolt.Tx, addr oid.Address, from *objectSDK.SplitInfo) error { - objKey := objectKey(addr.Object(), make([]byte, bucketKeySize)) - return updateSplitInfoIndex(tx, objKey, addr.Container(), make([]byte, bucketKeySize), from) -} - -// splitInfoFromObject returns split info based on last or linkin object. -// Otherwise returns nil, nil. -func splitInfoFromObject(obj *objectSDK.Object) (*objectSDK.SplitInfo, error) { - if obj.Parent() == nil { - return nil, nil - } - - info := objectSDK.NewSplitInfo() - info.SetSplitID(obj.SplitID()) - - switch { - case isLinkObject(obj): - id, ok := obj.ID() - if !ok { - return nil, errors.New("missing object ID") - } - - info.SetLink(id) - case isLastObject(obj): - id, ok := obj.ID() - if !ok { - return nil, errors.New("missing object ID") - } - - info.SetLastPart(id) - default: - return nil, ErrIncorrectRootObject // should never happen - } - - return info, nil -} - -// isLinkObject returns true if object contains parent header and list -// of children. -func isLinkObject(obj *objectSDK.Object) bool { - return len(obj.Children()) > 0 && obj.Parent() != nil -} - -// isLastObject returns true if object contains only parent header without list -// of children. -func isLastObject(obj *objectSDK.Object) bool { - return len(obj.Children()) == 0 && obj.Parent() != nil -} diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go deleted file mode 100644 index f37ed4cf2..000000000 --- a/pkg/local_object_storage/metabase/put_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package meta_test - -import ( - "context" - "runtime" - "strconv" - "sync/atomic" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func prepareObjects(n int) []*objectSDK.Object { - cnr := cidtest.ID() - parentID := objecttest.ID() - objs := make([]*objectSDK.Object, n) - for i := range objs { - objs[i] = testutil.GenerateObjectWithCID(cnr) - - // FKBT indices. - attrs := make([]objectSDK.Attribute, 20) - for j := range attrs { - attrs[j].SetKey("abc" + strconv.FormatUint(rand.Uint64()%4, 16)) - attrs[j].SetValue("xyz" + strconv.FormatUint(rand.Uint64()%4, 16)) - } - objs[i].SetAttributes(attrs...) - - // List indices. - if i%2 == 0 { - objs[i].SetParentID(parentID) - } - } - return objs -} - -func BenchmarkPut(b *testing.B) { - b.Run("parallel", func(b *testing.B) { - db := newDB(b, - meta.WithMaxBatchDelay(time.Millisecond*10), - meta.WithMaxBatchSize(runtime.NumCPU())) - defer func() { require.NoError(b, db.Close(context.Background())) }() - // Ensure the benchmark is bound by CPU and not waiting batch-delay time. - b.SetParallelism(1) - - var index atomic.Int64 - index.Store(-1) - - objs := prepareObjects(b.N) - b.ResetTimer() - b.ReportAllocs() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if err := metaPut(db, objs[index.Add(1)], nil); err != nil { - b.Fatal(err) - } - } - }) - }) - b.Run("sequential", func(b *testing.B) { - db := newDB(b, - meta.WithMaxBatchDelay(time.Millisecond*10), - meta.WithMaxBatchSize(1)) - defer func() { require.NoError(b, db.Close(context.Background())) }() - var index atomic.Int64 - index.Store(-1) - objs := prepareObjects(b.N) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - if err := metaPut(db, objs[index.Add(1)], nil); err != nil { - b.Fatal(err) - } - } - }) -} - -func TestDB_PutBlobovniczaUpdate(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - raw1 := testutil.GenerateObject() - storageID := []byte{1, 2, 3, 4} - - // put one object with storageID - err := metaPut(db, raw1, storageID) - require.NoError(t, err) - - fetchedStorageID, err := metaStorageID(db, object.AddressOf(raw1)) - require.NoError(t, err) - require.Equal(t, storageID, fetchedStorageID) - - t.Run("update storageID", func(t *testing.T) { - newID := []byte{5, 6, 7, 8} - - err := metaPut(db, raw1, newID) - require.NoError(t, err) - - fetchedBlobovniczaID, err := metaStorageID(db, object.AddressOf(raw1)) - require.NoError(t, err) - require.Equal(t, newID, fetchedBlobovniczaID) - }) - - t.Run("update storageID on bad object", func(t *testing.T) { - raw2 := testutil.GenerateObject() - err := putBig(db, raw2) - require.NoError(t, err) - - fetchedBlobovniczaID, err := metaStorageID(db, object.AddressOf(raw2)) - require.NoError(t, err) - require.Nil(t, fetchedBlobovniczaID) - }) -} - -func metaPut(db *meta.DB, obj *objectSDK.Object, id []byte) error { - var putPrm meta.PutPrm - putPrm.SetObject(obj) - putPrm.SetStorageID(id) - - _, err := db.Put(context.Background(), putPrm) - - return err -} diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go deleted file mode 100644 index 5f0956f0b..000000000 --- a/pkg/local_object_storage/metabase/reset_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package meta - -import ( - "context" - "fmt" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -type epochState struct{ e uint64 } - -func (s epochState) CurrentEpoch() uint64 { - return s.e -} - -func TestResetDropsContainerBuckets(t *testing.T) { - t.Parallel() - - db := New( - []Option{ - WithPath(filepath.Join(t.TempDir(), "metabase")), - WithPermissions(0o600), - WithEpochState(epochState{}), - }..., - ) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - - defer func() { require.NoError(t, db.Close(context.Background())) }() - - for idx := range 100 { - var putPrm PutPrm - putPrm.SetObject(testutil.GenerateObject()) - putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) - _, err := db.Put(context.Background(), putPrm) - require.NoError(t, err) - } - - require.NoError(t, db.Reset()) - - var bucketCount int - require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { - return tx.ForEach(func(name []byte, b *bbolt.Bucket) error { - _, exists := mStaticBuckets[string(name)] - require.True(t, exists, "unexpected bucket:"+string(name)) - bucketCount++ - return nil - }) - })) - require.Equal(t, len(mStaticBuckets), bucketCount) -} diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go deleted file mode 100644 index 60da50671..000000000 --- a/pkg/local_object_storage/metabase/select.go +++ /dev/null @@ -1,614 +0,0 @@ -package meta - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "strings" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type ( - // filterGroup is a structure that have search filters grouped by access - // method. We have fast filters that looks for indexes and do not unmarshal - // objects, and slow filters, that applied after fast filters created - // smaller set of objects to check. - filterGroup struct { - withCnrFilter bool - - cnr cid.ID - - fastFilters, slowFilters objectSDK.SearchFilters - } -) - -// SelectPrm groups the parameters of Select operation. -type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters - useAttributeIndex bool -} - -// SelectRes groups the resulting values of Select operation. -type SelectRes struct { - addrList []oid.Address -} - -// SetContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) SetContainerID(cnr cid.ID) { - p.cnr = cnr -} - -// SetFilters is a Select option to set the object filters. -func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) { - p.filters = fs -} - -func (p *SelectPrm) SetUseAttributeIndex(v bool) { - p.useAttributeIndex = v -} - -// AddressList returns list of addresses of the selected objects. -func (r SelectRes) AddressList() []oid.Address { - return r.addrList -} - -// Select returns list of addresses of objects that match search filters. -func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("Select", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.Select", - trace.WithAttributes( - attribute.String("container_id", prm.cnr.EncodeToString()), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - - if checkNonEmpty(prm.filters) { - success = true - return res, nil - } - - currEpoch := db.epochState.CurrentEpoch() - - return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex) - success = err == nil - return err - })) -} - -func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) { - group, err := groupFilters(fs, useAttributeIndex) - if err != nil { - return nil, err - } - - // if there are conflicts in query and container then it means that there is no - // objects to match this query. - if group.withCnrFilter && !cnr.Equals(group.cnr) { - return nil, nil - } - - // keep matched addresses in this cache - // value equal to number (index+1) of latest matched filter - mAddr := make(map[string]int) - - expLen := len(group.fastFilters) // expected value of matched filters in mAddr - - if len(group.fastFilters) == 0 { - expLen = 1 - - db.selectAll(tx, cnr, mAddr) - } else { - for i := range group.fastFilters { - db.selectFastFilter(tx, cnr, group.fastFilters[i], mAddr, i) - } - } - - res := make([]oid.Address, 0, len(mAddr)) - - bc := newBucketCache() - for a, ind := range mAddr { - if ind != expLen { - continue // ignore objects with unmatched fast filters - } - - var id oid.ID - err = id.Decode([]byte(a)) - if err != nil { - return nil, err - } - - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(id) - st, err := objectStatusWithCache(bc, tx, addr, currEpoch) - if err != nil { - return nil, err - } - if st > 0 { - continue // ignore removed objects - } - - addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) - if !match { - continue // ignore objects with unmatched slow filters - } - - res = append(res, addr) - } - - return res, nil -} - -// selectAll adds to resulting cache all available objects in metabase. -func (db *DB) selectAll(tx *bbolt.Tx, cnr cid.ID, to map[string]int) { - bucketName := make([]byte, bucketKeySize) - selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, 0) - selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, 0) - selectAllFromBucket(tx, parentBucketName(cnr, bucketName), to, 0) - selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, 0) -} - -// selectAllFromBucket goes through all keys in bucket and adds them in a -// resulting cache. Keys should be stringed object ids. -func selectAllFromBucket(tx *bbolt.Tx, name []byte, to map[string]int, fNum int) { - bkt := tx.Bucket(name) - if bkt == nil { - return - } - - _ = bkt.ForEach(func(k, _ []byte) error { - markAddressInCache(to, fNum, string(k)) - - return nil - }) -} - -// selectFastFilter makes fast optimized checks for well known buckets or -// looking through user attribute buckets otherwise. -func (db *DB) selectFastFilter( - tx *bbolt.Tx, - cnr cid.ID, // container we search on - f objectSDK.SearchFilter, // fast filter - to map[string]int, // resulting cache - fNum int, // index of filter -) { - currEpoch := db.epochState.CurrentEpoch() - bucketName := make([]byte, bucketKeySize) - switch f.Header() { - case v2object.FilterHeaderObjectID: - db.selectObjectID(tx, f, cnr, to, fNum, currEpoch) - case v2object.FilterHeaderObjectType: - for _, bucketName := range bucketNamesForType(cnr, f.Operation(), f.Value()) { - selectAllFromBucket(tx, bucketName, to, fNum) - } - case v2object.FilterHeaderParent: - bucketName := parentBucketName(cnr, bucketName) - db.selectFromList(tx, bucketName, f, to, fNum) - case v2object.FilterHeaderSplitID: - bucketName := splitBucketName(cnr, bucketName) - db.selectFromList(tx, bucketName, f, to, fNum) - case v2object.FilterHeaderECParent: - bucketName := ecInfoBucketName(cnr, bucketName) - db.selectFromList(tx, bucketName, f, to, fNum) - case v2object.FilterPropertyRoot: - selectAllFromBucket(tx, rootBucketName(cnr, bucketName), to, fNum) - case v2object.FilterPropertyPhy: - selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum) - selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum) - selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum) - default: // user attribute - bucketName := attributeBucketName(cnr, f.Header(), bucketName) - if f.Operation() == objectSDK.MatchNotPresent { - selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum) - } else { - db.selectFromFKBT(tx, bucketName, f, to, fNum) - } - } -} - -var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{ - v2object.TypeRegular.String(): {primaryBucketName, parentBucketName}, - v2object.TypeTombstone.String(): {tombstoneBucketName}, - v2object.TypeLock.String(): {bucketNameLockers}, -} - -func allBucketNames(cnr cid.ID) (names [][]byte) { - for _, fns := range mBucketNaming { - for _, fn := range fns { - names = append(names, fn(cnr, make([]byte, bucketKeySize))) - } - } - return -} - -func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) { - appendNames := func(key string) { - fns, ok := mBucketNaming[key] - if ok { - for _, fn := range fns { - names = append(names, fn(cnr, make([]byte, bucketKeySize))) - } - } - } - - switch mType { - default: - case objectSDK.MatchStringNotEqual: - for key := range mBucketNaming { - if key != typeVal { - appendNames(key) - } - } - case objectSDK.MatchStringEqual: - appendNames(typeVal) - case objectSDK.MatchCommonPrefix: - for key := range mBucketNaming { - if strings.HasPrefix(key, typeVal) { - appendNames(key) - } - } - } - - return -} - -func (db *DB) selectFromFKBT( - tx *bbolt.Tx, - name []byte, // fkbt root bucket name - f objectSDK.SearchFilter, // filter for operation and value - to map[string]int, // resulting cache - fNum int, // index of filter -) { // - matchFunc, ok := db.matchers[f.Operation()] - if !ok { - return - } - - fkbtRoot := tx.Bucket(name) - if fkbtRoot == nil { - return - } - - _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error { - fkbtLeaf := fkbtRoot.Bucket(k) - if fkbtLeaf == nil { - return nil - } - - return fkbtLeaf.ForEach(func(k, _ []byte) error { - markAddressInCache(to, fNum, string(k)) - - return nil - }) - }) -} - -// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in -// resulting cache. -func selectOutsideFKBT( - tx *bbolt.Tx, - incl [][]byte, // buckets - name []byte, // fkbt root bucket name - to map[string]int, // resulting cache - fNum int, // index of filter -) { - mExcl := make(map[string]struct{}) - - bktExcl := tx.Bucket(name) - if bktExcl != nil { - _ = bktExcl.ForEachBucket(func(k []byte) error { - exclBktLeaf := bktExcl.Bucket(k) - return exclBktLeaf.ForEach(func(k, _ []byte) error { - mExcl[string(k)] = struct{}{} - - return nil - }) - }) - } - - for i := range incl { - bktIncl := tx.Bucket(incl[i]) - if bktIncl == nil { - continue - } - - _ = bktIncl.ForEach(func(k, _ []byte) error { - if _, ok := mExcl[string(k)]; !ok { - markAddressInCache(to, fNum, string(k)) - } - - return nil - }) - } -} - -// selectFromList looks into index to find list of addresses to add in -// resulting cache. -func (db *DB) selectFromList( - tx *bbolt.Tx, - name []byte, // list root bucket name - f objectSDK.SearchFilter, // filter for operation and value - to map[string]int, // resulting cache - fNum int, // index of filter -) { // - bkt := tx.Bucket(name) - if bkt == nil { - return - } - - var ( - lst [][]byte - err error - ) - - switch op := f.Operation(); op { - case objectSDK.MatchStringEqual: - lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value()))) - if err != nil { - return - } - default: - fMatch, ok := db.matchers[op] - if !ok { - return - } - - if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error { - l, err := decodeList(val) - if err != nil { - return err - } - - lst = append(lst, l...) - - return nil - }); err != nil { - return - } - } - - for i := range lst { - markAddressInCache(to, fNum, string(lst[i])) - } -} - -// selectObjectID processes objectID filter with in-place optimizations. -func (db *DB) selectObjectID( - tx *bbolt.Tx, - f objectSDK.SearchFilter, - cnr cid.ID, - to map[string]int, // resulting cache - fNum int, // index of filter - currEpoch uint64, -) { - appendOID := func(id oid.ID) { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(id) - - var splitInfoError *objectSDK.SplitInfoError - ok, _, err := db.exists(tx, addr, oid.Address{}, currEpoch) - if (err == nil && ok) || errors.As(err, &splitInfoError) { - raw := make([]byte, objectKeySize) - id.Encode(raw) - markAddressInCache(to, fNum, string(raw)) - } - } - - switch op := f.Operation(); op { - case objectSDK.MatchStringEqual: - var id oid.ID - if err := id.DecodeString(f.Value()); err == nil { - appendOID(id) - } - default: - fMatch, ok := db.matchers[op] - if !ok { - return - } - - for _, bucketName := range bucketNamesForType(cnr, objectSDK.MatchStringNotEqual, "") { - // copy-paste from DB.selectAllFrom - bkt := tx.Bucket(bucketName) - if bkt == nil { - return - } - - _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error { - var id oid.ID - if err := id.Decode(k); err == nil { - appendOID(id) - } - return nil - }) - } - } -} - -// matchSlowFilters return true if object header is matched by all slow filters. -func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { - result := addr - if len(f) == 0 { - return result, true - } - - obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) - if err != nil { - return result, false - } - - for i := range f { - var data []byte - switch f[i].Header() { - case v2object.FilterHeaderVersion: - data = []byte(obj.Version().String()) - case v2object.FilterHeaderHomomorphicHash: - if isECChunk { - return result, false // EC chunk and EC parent hashes are incomparable - } - cs, _ := obj.PayloadHomomorphicHash() - data = cs.Value() - case v2object.FilterHeaderCreationEpoch: - data = make([]byte, 8) - binary.LittleEndian.PutUint64(data, obj.CreationEpoch()) - case v2object.FilterHeaderPayloadLength: - if isECChunk { - return result, false // EC chunk and EC parent payload lengths are incomparable - } - data = make([]byte, 8) - binary.LittleEndian.PutUint64(data, obj.PayloadSize()) - case v2object.FilterHeaderOwnerID: - data = []byte(obj.OwnerID().EncodeToString()) - case v2object.FilterHeaderPayloadHash: - if isECChunk { - return result, false // EC chunk and EC parent payload hashes are incomparable - } - cs, _ := obj.PayloadChecksum() - data = cs.Value() - default: // user attribute - v, ok := attributeValue(obj, f[i].Header()) - if ok { - if ech := obj.ECHeader(); ech != nil { - result.SetObject(ech.Parent()) - } - data = []byte(v) - } else { - return result, f[i].Operation() == objectSDK.MatchNotPresent - } - } - - matchFunc, ok := db.matchers[f[i].Operation()] - if !ok { - return result, false - } - - if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) { - return result, false - } - } - - return result, true -} - -func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { - buf := make([]byte, addressKeySize) - obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) - if err != nil { - var ecInfoError *objectSDK.ECInfoError - if errors.As(err, &ecInfoError) { - for _, chunk := range ecInfoError.ECInfo().Chunks { - var objID oid.ID - if err = objID.ReadFromV2(chunk.ID); err != nil { - continue - } - addr.SetObject(objID) - obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) - if err == nil { - return obj, true, nil - } - } - } - return nil, false, err - } - return obj, false, nil -} - -func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) { - objectAttributes := obj.Attributes() - if ech := obj.ECHeader(); ech != nil { - objectAttributes = ech.ParentAttributes() - } - for _, attr := range objectAttributes { - if attr.Key() == attribute { - return attr.Value(), true - } - } - return "", false -} - -// groupFilters divides filters in two groups: fast and slow. Fast filters -// processed by indexes and slow filters processed after by unmarshaling -// object headers. -func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) { - res := filterGroup{ - fastFilters: make(objectSDK.SearchFilters, 0, len(filters)), - slowFilters: make(objectSDK.SearchFilters, 0, len(filters)), - } - - for i := range filters { - switch filters[i].Header() { - case v2object.FilterHeaderContainerID: // support deprecated field - err := res.cnr.DecodeString(filters[i].Value()) - if err != nil { - return filterGroup{}, fmt.Errorf("parse container id: %w", err) - } - - res.withCnrFilter = true - case // fast filters - v2object.FilterHeaderObjectID, - v2object.FilterHeaderObjectType, - v2object.FilterHeaderParent, - v2object.FilterHeaderSplitID, - v2object.FilterHeaderECParent, - v2object.FilterPropertyRoot, - v2object.FilterPropertyPhy: - res.fastFilters = append(res.fastFilters, filters[i]) - default: - if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) { - res.fastFilters = append(res.fastFilters, filters[i]) - } else { - res.slowFilters = append(res.slowFilters, filters[i]) - } - } - } - - return res, nil -} - -func markAddressInCache(cache map[string]int, fNum int, addr string) { - if num := cache[addr]; num == fNum { - cache[addr] = num + 1 - } -} - -// Returns true if at least 1 object can satisfy fs. -func checkNonEmpty(fs objectSDK.SearchFilters) bool { - for i := range fs { - if fs[i].Operation() == objectSDK.MatchNotPresent && isSystemKey(fs[i].Header()) { - return true - } - } - - return false -} - -// returns true if string key is a reserved system filter key. -func isSystemKey(key string) bool { - return strings.HasPrefix(key, v2object.ReservedFilterPrefix) -} diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go deleted file mode 100644 index ce2156d2e..000000000 --- a/pkg/local_object_storage/metabase/select_test.go +++ /dev/null @@ -1,1244 +0,0 @@ -package meta_test - -import ( - "context" - "encoding/hex" - "math/rand" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestDB_SelectUserAttributes(t *testing.T) { - t.Parallel() - t.Run("with_index", func(t *testing.T) { - testSelectUserAttributes(t, true) - }) - t.Run("without_index", func(t *testing.T) { - testSelectUserAttributes(t, false) - }) -} - -func testSelectUserAttributes(t *testing.T, index bool) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - raw1 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw1, "foo", "bar") - testutil.AddAttribute(raw1, "x", "y") - - var putPrm meta.PutPrm - putPrm.SetIndexAttributes(index) - putPrm.SetObject(raw1) - _, err := db.Put(context.Background(), putPrm) - require.NoError(t, err) - - raw2 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw2, "foo", "bar") - testutil.AddAttribute(raw2, "x", "z") - - putPrm.SetObject(raw2) - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - raw3 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw3, "a", "b") - - putPrm.SetObject(raw3) - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - raw4 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2") - - putPrm.SetObject(raw4) - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - raw5 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3") - - putPrm.SetObject(raw5) - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - raw6 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3") - - putPrm.SetObject(raw6) - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - - raw7 := testutil.GenerateObjectWithCID(cnr) - var attr objectSDK.Attribute - attr.SetKey(objectSDK.AttributeFilePath) - attr.SetValue("/test/3/4") - attrs := raw7.Attributes() - attrs = append(attrs, attr) - ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{ - ID: oidtest.ID(), - Attributes: attrs, - }, 0, 3, []byte{}, 0) - raw7.SetECHeader(ech) - putPrm.SetObject(raw7) - _, err = db.Put(context.Background(), putPrm) - require.NoError(t, err) - var raw7Parent oid.Address - raw7Parent.SetContainer(cnr) - raw7Parent.SetObject(ech.Parent()) - - fs := objectSDK.SearchFilters{} - fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual) - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw1), - object.AddressOf(raw2), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("x", "y", objectSDK.MatchStringEqual) - testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual) - testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("a", "b", objectSDK.MatchStringEqual) - testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("c", "d", objectSDK.MatchStringEqual) - testSelect2(t, db, cnr, fs, index) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("foo", "", objectSDK.MatchNotPresent) - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw3), - object.AddressOf(raw4), - object.AddressOf(raw5), - object.AddressOf(raw6), - object.AddressOf(raw7), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("a", "", objectSDK.MatchNotPresent) - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw1), - object.AddressOf(raw2), - object.AddressOf(raw4), - object.AddressOf(raw5), - object.AddressOf(raw6), - object.AddressOf(raw7), - ) - - fs = objectSDK.SearchFilters{} - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw1), - object.AddressOf(raw2), - object.AddressOf(raw3), - object.AddressOf(raw4), - object.AddressOf(raw5), - object.AddressOf(raw6), - object.AddressOf(raw7), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter("key", "", objectSDK.MatchNotPresent) - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw1), - object.AddressOf(raw2), - object.AddressOf(raw3), - object.AddressOf(raw4), - object.AddressOf(raw5), - object.AddressOf(raw6), - object.AddressOf(raw7), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix) - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw4), - object.AddressOf(raw5), - object.AddressOf(raw6), - raw7Parent, - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix) - testSelect2(t, db, cnr, fs, index, - object.AddressOf(raw4), - object.AddressOf(raw5), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual) - testSelect2(t, db, cnr, fs, index, - raw7Parent, - ) -} - -func TestDB_SelectRootPhyParent(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - // prepare - - small := testutil.GenerateObjectWithCID(cnr) - err := putBig(db, small) - require.NoError(t, err) - - ts := testutil.GenerateObjectWithCID(cnr) - ts.SetType(objectSDK.TypeTombstone) - err = putBig(db, ts) - require.NoError(t, err) - - leftChild := testutil.GenerateObjectWithCID(cnr) - leftChild.InitRelations() - err = putBig(db, leftChild) - require.NoError(t, err) - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - err = putBig(db, lock) - require.NoError(t, err) - - parent := testutil.GenerateObjectWithCID(cnr) - - rightChild := testutil.GenerateObjectWithCID(cnr) - rightChild.SetParent(parent) - idParent, _ := parent.ID() - rightChild.SetParentID(idParent) - err = putBig(db, rightChild) - require.NoError(t, err) - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(idParent) - idLeftChild, _ := leftChild.ID() - idRightChild, _ := rightChild.ID() - link.SetChildren(idLeftChild, idRightChild) - - err = putBig(db, link) - require.NoError(t, err) - - t.Run("root objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddRootFilter() - testSelect(t, db, cnr, fs, - object.AddressOf(small), - object.AddressOf(parent), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterPropertyRoot, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("phy objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddPhyFilter() - testSelect(t, db, cnr, fs, - object.AddressOf(small), - object.AddressOf(ts), - object.AddressOf(leftChild), - object.AddressOf(rightChild), - object.AddressOf(link), - object.AddressOf(lock), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterPropertyPhy, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("regular objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeRegular.String(), objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, - object.AddressOf(small), - object.AddressOf(leftChild), - object.AddressOf(rightChild), - object.AddressOf(link), - object.AddressOf(parent), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeRegular.String(), objectSDK.MatchStringNotEqual) - testSelect(t, db, cnr, fs, - object.AddressOf(ts), - object.AddressOf(lock), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("tombstone objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeTombstone.String(), objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(ts)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeTombstone.String(), objectSDK.MatchStringNotEqual) - testSelect(t, db, cnr, fs, - object.AddressOf(small), - object.AddressOf(leftChild), - object.AddressOf(rightChild), - object.AddressOf(link), - object.AddressOf(parent), - object.AddressOf(lock), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("objects with parent", func(t *testing.T) { - idParent, _ := parent.ID() - - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderParent, - idParent.EncodeToString(), - objectSDK.MatchStringEqual) - - testSelect(t, db, cnr, fs, - object.AddressOf(rightChild), - object.AddressOf(link), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderParent, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("all objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - testSelect(t, db, cnr, fs, - object.AddressOf(small), - object.AddressOf(ts), - object.AddressOf(leftChild), - object.AddressOf(rightChild), - object.AddressOf(link), - object.AddressOf(parent), - object.AddressOf(lock), - ) - }) -} - -func TestDB_SelectInhume(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - raw1 := testutil.GenerateObjectWithCID(cnr) - err := putBig(db, raw1) - require.NoError(t, err) - - raw2 := testutil.GenerateObjectWithCID(cnr) - err = putBig(db, raw2) - require.NoError(t, err) - - fs := objectSDK.SearchFilters{} - testSelect(t, db, cnr, fs, - object.AddressOf(raw1), - object.AddressOf(raw2), - ) - - err = metaInhume(db, object.AddressOf(raw2), oidtest.ID()) - require.NoError(t, err) - - fs = objectSDK.SearchFilters{} - testSelect(t, db, cnr, fs, - object.AddressOf(raw1), - ) -} - -func TestDB_SelectPayloadHash(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - raw1 := testutil.GenerateObjectWithCID(cnr) - err := putBig(db, raw1) - require.NoError(t, err) - - raw2 := testutil.GenerateObjectWithCID(cnr) - err = putBig(db, raw2) - require.NoError(t, err) - - cs, _ := raw1.PayloadChecksum() - payloadHash := hex.EncodeToString(cs.Value()) - - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - payloadHash, - objectSDK.MatchStringEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - payloadHash[:len(payloadHash)-1], - objectSDK.MatchCommonPrefix) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - payloadHash, - objectSDK.MatchStringNotEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - "", - objectSDK.MatchNotPresent) - - testSelect(t, db, cnr, fs) - - t.Run("invalid hashes", func(t *testing.T) { - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - payloadHash[:len(payloadHash)-1], - objectSDK.MatchStringNotEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1), object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - payloadHash[:len(payloadHash)-2]+"x", - objectSDK.MatchCommonPrefix) - - testSelect(t, db, cnr, fs) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadHash, - payloadHash[:len(payloadHash)-3]+"x0", - objectSDK.MatchCommonPrefix) - - testSelect(t, db, cnr, fs) - }) -} - -func TestDB_SelectWithSlowFilters(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - v20 := new(version.Version) - v20.SetMajor(2) - - var v21 version.Version - v21.SetMajor(2) - v21.SetMinor(1) - - raw1 := testutil.GenerateObjectWithCID(cnr) - raw1.SetPayloadSize(10) - raw1.SetCreationEpoch(11) - raw1.SetVersion(v20) - err := putBig(db, raw1) - require.NoError(t, err) - - raw2 := testutil.GenerateObjectWithCID(cnr) - raw2.SetPayloadSize(20) - raw2.SetCreationEpoch(21) - raw2.SetVersion(&v21) - err = putBig(db, raw2) - require.NoError(t, err) - - t.Run("object with TZHash", func(t *testing.T) { - cs, _ := raw1.PayloadHomomorphicHash() - - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderHomomorphicHash, - hex.EncodeToString(cs.Value()), - objectSDK.MatchStringEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderHomomorphicHash, - hex.EncodeToString(cs.Value()), - objectSDK.MatchStringNotEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderHomomorphicHash, - "", - objectSDK.MatchNotPresent) - - testSelect(t, db, cnr, fs) - }) - - t.Run("object with payload length", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadLength, "20", objectSDK.MatchStringEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadLength, "20", objectSDK.MatchStringNotEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderPayloadLength, "", objectSDK.MatchNotPresent) - - testSelect(t, db, cnr, fs) - }) - - t.Run("object with creation epoch", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderCreationEpoch, "11", objectSDK.MatchStringEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderCreationEpoch, "11", objectSDK.MatchStringNotEqual) - - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderCreationEpoch, "", objectSDK.MatchNotPresent) - - testSelect(t, db, cnr, fs) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderCreationEpoch, "1", objectSDK.MatchCommonPrefix) - - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - }) - - t.Run("object with version", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddObjectVersionFilter(objectSDK.MatchStringEqual, v21) - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectVersionFilter(objectSDK.MatchStringNotEqual, v21) - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectVersionFilter(objectSDK.MatchNotPresent, version.Version{}) - testSelect(t, db, cnr, fs) - }) -} - -func TestDB_SelectObjectID(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - // prepare - - parent := testutil.GenerateObjectWithCID(cnr) - - regular := testutil.GenerateObjectWithCID(cnr) - idParent, _ := parent.ID() - regular.SetParentID(idParent) - regular.SetParent(parent) - - err := putBig(db, regular) - require.NoError(t, err) - - ts := testutil.GenerateObjectWithCID(cnr) - ts.SetType(objectSDK.TypeTombstone) - err = putBig(db, ts) - require.NoError(t, err) - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - err = putBig(db, lock) - require.NoError(t, err) - - t.Run("not found objects", func(t *testing.T) { - raw := testutil.GenerateObjectWithCID(cnr) - - id, _ := raw.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - - testSelect(t, db, cnr, fs) - - fs = objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) - - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(ts), - object.AddressOf(lock), - ) - }) - - t.Run("regular objects", func(t *testing.T) { - id, _ := regular.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - testSelect(t, db, cnr, fs, object.AddressOf(regular)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) - testSelect(t, db, cnr, fs, - object.AddressOf(parent), - object.AddressOf(ts), - object.AddressOf(lock), - ) - }) - - t.Run("tombstone objects", func(t *testing.T) { - id, _ := ts.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - testSelect(t, db, cnr, fs, object.AddressOf(ts)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(lock), - ) - }) - - t.Run("parent objects", func(t *testing.T) { - id, _ := parent.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - testSelect(t, db, cnr, fs, object.AddressOf(parent)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(ts), - object.AddressOf(lock), - ) - }) - - t.Run("lock objects", func(t *testing.T) { - id, _ := lock.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - testSelect(t, db, cnr, fs, object.AddressOf(lock)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(ts), - ) - }) -} - -func TestDB_SelectOwnerID(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - // prepare - - parent := testutil.GenerateObjectWithCID(cnr) - - regular := testutil.GenerateObjectWithCID(cnr) - idParent, _ := parent.ID() - regular.SetParentID(idParent) - regular.SetParent(parent) - - err := putBig(db, regular) - require.NoError(t, err) - - ts := testutil.GenerateObjectWithCID(cnr) - ts.SetType(objectSDK.TypeTombstone) - err = putBig(db, ts) - require.NoError(t, err) - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - err = putBig(db, lock) - require.NoError(t, err) - - t.Run("not found objects", func(t *testing.T) { - raw := testutil.GenerateObjectWithCID(cnr) - - fs := objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, raw.OwnerID()) - - testSelect(t, db, cnr, fs) - - fs = objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, raw.OwnerID()) - - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(ts), - object.AddressOf(lock), - ) - }) - - t.Run("regular objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, regular.OwnerID()) - testSelect(t, db, cnr, fs, object.AddressOf(regular)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, regular.OwnerID()) - testSelect(t, db, cnr, fs, - object.AddressOf(parent), - object.AddressOf(ts), - object.AddressOf(lock), - ) - }) - - t.Run("tombstone objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, ts.OwnerID()) - testSelect(t, db, cnr, fs, object.AddressOf(ts)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, ts.OwnerID()) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(lock), - ) - }) - - t.Run("parent objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, parent.OwnerID()) - testSelect(t, db, cnr, fs, object.AddressOf(parent)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, parent.OwnerID()) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(ts), - object.AddressOf(lock), - ) - }) - - t.Run("lock objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, lock.OwnerID()) - testSelect(t, db, cnr, fs, object.AddressOf(lock)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, lock.OwnerID()) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(ts), - ) - }) -} - -func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - ecChunk1 := oidtest.ID() - ecChunk2 := oidtest.ID() - ecParent := oidtest.ID() - var ecParentAddr oid.Address - ecParentAddr.SetContainer(cnr) - ecParentAddr.SetObject(ecParent) - var ecParentAttr []objectSDK.Attribute - var attr objectSDK.Attribute - attr.SetKey(objectSDK.AttributeFilePath) - attr.SetValue("/1/2/3") - ecParentAttr = append(ecParentAttr, attr) - - chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetID(ecChunk1) - chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) - chunkObj.SetPayloadSize(uint64(5)) - chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0)) - - chunkObj2 := testutil.GenerateObjectWithCID(cnr) - chunkObj2.SetID(ecChunk2) - chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) - chunkObj2.SetPayloadSize(uint64(10)) - chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0)) - - // put object with EC - - var prm meta.PutPrm - prm.SetObject(chunkObj) - _, err := db.Put(context.Background(), prm) - require.NoError(t, err) - - prm.SetObject(chunkObj2) - _, err = db.Put(context.Background(), prm) - require.NoError(t, err) - - fs := objectSDK.SearchFilters{} - fs.AddRootFilter() - fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix) - testSelect(t, db, cnr, fs, ecParentAddr) -} - -type testTarget struct { - objects []*objectSDK.Object -} - -func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error { - tt.objects = append(tt.objects, obj) - return nil -} - -func cutObject(t *testing.T, p transformer.ChunkedObjectWriter, hdr *objectSDK.Object, size int) *transformer.AccessIdentifiers { - ctx := context.Background() - require.NoError(t, p.WriteHeader(ctx, hdr)) - - payload := make([]byte, size) - rand.New(rand.NewSource(0)).Read(payload) - - _, err := p.Write(ctx, payload) - require.NoError(t, err) - - ids, err := p.Close(ctx) - require.NoError(t, err) - return ids -} - -func TestDB_RawHead_SplitInfo(t *testing.T) { - t.Parallel() - - const ( - partSize = 10 - partCount = 2 - dataCount = 2 - parityCount = 1 - ) - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - tt := new(testTarget) - p := transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: &pk.PrivateKey, - NextTargetInit: func() transformer.ObjectWriter { return tt }, - NetworkState: epochState{e: 1}, - MaxSize: partSize, - }) - - hdr := objectSDK.New() - hdr.SetContainerID(cnr) - hdr.SetOwnerID(usertest.ID()) - ids := cutObject(t, p, hdr, partSize*partCount) - require.Equal(t, len(tt.objects), partCount+1) - - t.Run("rep", func(t *testing.T) { - testGetRawSplitInfo(t, cnr, ids, tt.objects[partCount], tt.objects[partCount-1]) - }) - t.Run("with ec", func(t *testing.T) { - ec, err := erasurecode.NewConstructor(dataCount, parityCount) - require.NoError(t, err) - - cs, err := ec.Split(tt.objects[partCount-1], &pk.PrivateKey) - require.NoError(t, err) - - testGetRawSplitInfo(t, cnr, ids, tt.objects[partCount], cs[0]) - }) -} - -func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIdentifiers, linking, lastPart *objectSDK.Object) { - expectedLinkID, ok := linking.ID() - require.True(t, ok) - - t.Run("first last, then linking", func(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - require.NoError(t, metaPut(db, lastPart, nil)) - require.NoError(t, metaPut(db, linking, nil)) - - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*ids.ParentID) - - _, err := metaGet(db, addr, true) - - var siErr *objectSDK.SplitInfoError - require.ErrorAs(t, err, &siErr) - - lastID, ok := siErr.SplitInfo().LastPart() - require.True(t, ok) - require.Equal(t, ids.SelfID, lastID) - - linkID, ok := siErr.SplitInfo().Link() - require.True(t, ok) - require.Equal(t, expectedLinkID, linkID) - }) - t.Run("first linking, then last", func(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - require.NoError(t, metaPut(db, linking, nil)) - require.NoError(t, metaPut(db, lastPart, nil)) - - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*ids.ParentID) - - _, err := metaGet(db, addr, true) - - var siErr *objectSDK.SplitInfoError - require.ErrorAs(t, err, &siErr) - - lastID, ok := siErr.SplitInfo().LastPart() - require.True(t, ok) - require.Equal(t, ids.SelfID, lastID) - - linkID, ok := siErr.SplitInfo().Link() - require.True(t, ok) - require.Equal(t, expectedLinkID, linkID) - }) - t.Run("only last part", func(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - require.NoError(t, metaPut(db, lastPart, nil)) - - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*ids.ParentID) - - _, err := metaGet(db, addr, true) - - var siErr *objectSDK.SplitInfoError - require.ErrorAs(t, err, &siErr) - - lastPart, ok := siErr.SplitInfo().LastPart() - require.True(t, ok) - require.Equal(t, ids.SelfID, lastPart) - }) -} - -func TestDB_SelectSplitID_EC(t *testing.T) { - t.Parallel() - - const ( - partSize = 10 - partCount = 2 - dataCount = 2 - parityCount = 1 - ) - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - tt := new(testTarget) - p := transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: &pk.PrivateKey, - NextTargetInit: func() transformer.ObjectWriter { return tt }, - NetworkState: epochState{e: 1}, - MaxSize: partSize, - }) - - hdr := objectSDK.New() - hdr.SetContainerID(cnr) - hdr.SetOwnerID(usertest.ID()) - cutObject(t, p, hdr, partSize*partCount) - require.Equal(t, len(tt.objects), partCount+1) - - split := tt.objects[0].SplitID() - require.NotNil(t, split) - - ec, err := erasurecode.NewConstructor(dataCount, parityCount) - require.NoError(t, err) - - for i := range partCount { - cs, err := ec.Split(tt.objects[i], &pk.PrivateKey) - require.NoError(t, err) - - require.NoError(t, putBig(db, cs[0])) - } - - t.Run("not present", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("split id", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, split.String(), objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, - object.AddressOf(tt.objects[0]), - object.AddressOf(tt.objects[1]), - ) - }) - - t.Run("empty split", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs) - }) - - t.Run("unknown split id", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, - objectSDK.NewSplitID().String(), - objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs) - }) -} - -func TestDB_SelectSplitID(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - child1 := testutil.GenerateObjectWithCID(cnr) - child2 := testutil.GenerateObjectWithCID(cnr) - child3 := testutil.GenerateObjectWithCID(cnr) - - split1 := objectSDK.NewSplitID() - split2 := objectSDK.NewSplitID() - - child1.SetSplitID(split1) - child2.SetSplitID(split1) - child3.SetSplitID(split2) - - require.NoError(t, putBig(db, child1)) - require.NoError(t, putBig(db, child2)) - require.NoError(t, putBig(db, child3)) - - t.Run("not present", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("split id", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, split1.String(), objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, - object.AddressOf(child1), - object.AddressOf(child2), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, split2.String(), objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(child3)) - }) - - t.Run("empty split", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs) - }) - - t.Run("unknown split id", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderSplitID, - objectSDK.NewSplitID().String(), - objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs) - }) -} - -func TestDB_SelectContainerID(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - - obj1 := testutil.GenerateObjectWithCID(cnr) - err := putBig(db, obj1) - require.NoError(t, err) - - obj2 := testutil.GenerateObjectWithCID(cnr) - err = putBig(db, obj2) - require.NoError(t, err) - - t.Run("same cid", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddObjectContainerIDFilter(objectSDK.MatchStringEqual, cnr) - - testSelect(t, db, cnr, fs, - object.AddressOf(obj1), - object.AddressOf(obj2), - ) - - fs = objectSDK.SearchFilters{} - fs.AddObjectContainerIDFilter(objectSDK.MatchStringNotEqual, cnr) - - testSelect(t, db, cnr, fs, - object.AddressOf(obj1), - object.AddressOf(obj2), - ) - - fs = objectSDK.SearchFilters{} - fs.AddObjectContainerIDFilter(objectSDK.MatchNotPresent, cnr) - - testSelect(t, db, cnr, fs) - }) - - t.Run("not same cid", func(t *testing.T) { - newCnr := cidtest.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectContainerIDFilter(objectSDK.MatchStringEqual, newCnr) - - testSelect(t, db, cnr, fs) - }) -} - -func BenchmarkSelect(b *testing.B) { - const objCount = 1000 - db := newDB(b) - defer func() { require.NoError(b, db.Close(context.Background())) }() - - cid := cidtest.ID() - - for i := range objCount { - var attr objectSDK.Attribute - attr.SetKey("myHeader") - attr.SetValue(strconv.Itoa(i)) - obj := testutil.GenerateObjectWithCID(cid) - obj.SetAttributes(attr) - require.NoError(b, metaPut(db, obj, nil)) - } - - b.Run("string equal", func(b *testing.B) { - fs := objectSDK.SearchFilters{} - fs.AddFilter("myHeader", strconv.Itoa(objCount/2), objectSDK.MatchStringEqual) - benchmarkSelect(b, db, cid, fs, 1) - }) - b.Run("string not equal", func(b *testing.B) { - fs := objectSDK.SearchFilters{} - fs.AddFilter("myHeader", strconv.Itoa(objCount/2), objectSDK.MatchStringNotEqual) - benchmarkSelect(b, db, cid, fs, objCount-1) - }) - b.Run("common prefix", func(b *testing.B) { - prefix := "99" - n := 1 /* 99 */ + 10 /* 990..999 */ - - fs := objectSDK.SearchFilters{} - fs.AddFilter("myHeader", prefix, objectSDK.MatchCommonPrefix) - benchmarkSelect(b, db, cid, fs, n) - }) - b.Run("unknown", func(b *testing.B) { - fs := objectSDK.SearchFilters{} - fs.AddFilter("myHeader", strconv.Itoa(objCount/2), objectSDK.MatchUnknown) - benchmarkSelect(b, db, cid, fs, 0) - }) -} - -func TestExpiredObjects(t *testing.T) { - t.Parallel() - - db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { - cidExp, _ := exp.ContainerID() - cidNonExp, _ := nonExp.ContainerID() - - objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false) - require.NoError(t, err) - require.Empty(t, objs) // expired object should not be returned - - objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false) - require.NoError(t, err) - require.NotEmpty(t, objs) - }) -} - -func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { - b.ReportAllocs() - - var prm meta.SelectPrm - prm.SetContainerID(cid) - prm.SetFilters(fs) - - for range b.N { - res, err := db.Select(context.Background(), prm) - if err != nil { - b.Fatal(err) - } - if len(res.AddressList()) != expected { - b.Fatalf("expected %d items, got %d", expected, len(res.AddressList())) - } - } -} - -func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) { - var prm meta.SelectPrm - prm.SetFilters(fs) - prm.SetContainerID(cnr) - prm.SetUseAttributeIndex(useAttributeIndex) - - res, err := db.Select(context.Background(), prm) - return res.AddressList(), err -} diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go deleted file mode 100644 index 72618b1a0..000000000 --- a/pkg/local_object_storage/metabase/shard_id.go +++ /dev/null @@ -1,97 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - metamode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "go.etcd.io/bbolt" -) - -var ( - shardInfoBucket = []byte{shardInfoPrefix} - shardIDKey = []byte("id") -) - -// GetShardID sets metabase operation mode -// and reads shard id from db. -// If id is missing, returns nil, nil. -// -// GetShardID does not report any metrics. -func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) { - db.modeMtx.Lock() - defer db.modeMtx.Unlock() - db.mode = mode - - if _, err := os.Stat(db.info.Path); errors.Is(err, os.ErrNotExist) { - return nil, nil - } - - if err := db.openDB(ctx, mode); err != nil { - return nil, fmt.Errorf("open metabase: %w", err) - } - - id, err := db.readShardID() - - if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) - } - - return id, metaerr.Wrap(err) -} - -// ReadShardID reads shard id from db. -// If id is missing, returns nil, nil. -func (db *DB) readShardID() ([]byte, error) { - var id []byte - err := db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(shardInfoBucket) - if b != nil { - id = bytes.Clone(b.Get(shardIDKey)) - } - return nil - }) - return id, metaerr.Wrap(err) -} - -// SetShardID sets metabase operation mode -// and writes shard id to db. -func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error { - db.modeMtx.Lock() - defer db.modeMtx.Unlock() - db.mode = mode - - if mode.ReadOnly() { - return ErrReadOnlyMode - } - - if err := db.openDB(ctx, mode); err != nil { - return fmt.Errorf("open metabase: %w", err) - } - - err := db.writeShardID(id) - if err == nil { - db.metrics.SetMode(metamode.ConvertToComponentModeDegraded(mode)) - } - - if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) - } - - return metaerr.Wrap(err) -} - -// writeShardID writes shard id to db. -func (db *DB) writeShardID(id []byte) error { - return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(shardInfoBucket) - if err != nil { - return err - } - return b.Put(shardIDKey, id) - })) -} diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go deleted file mode 100644 index 8f2376503..000000000 --- a/pkg/local_object_storage/metabase/storage_id.go +++ /dev/null @@ -1,134 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// StorageIDPrm groups the parameters of StorageID operation. -type StorageIDPrm struct { - addr oid.Address -} - -// StorageIDRes groups the resulting values of StorageID operation. -type StorageIDRes struct { - id []byte -} - -// SetAddress is a StorageID option to set the object address to check. -func (p *StorageIDPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// StorageID returns storage ID. -func (r StorageIDRes) StorageID() []byte { - return r.id -} - -// StorageID returns storage descriptor for objects from the blobstor. -// It is put together with the object can makes get/delete operation faster. -func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("StorageID", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - var res StorageIDRes - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } - - err := db.boltDB.View(func(tx *bbolt.Tx) error { - res.id = db.storageID(tx, prm.addr) - return nil - }) - success = err == nil - return res, metaerr.Wrap(err) -} - -func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte { - key := make([]byte, bucketKeySize) - smallBucket := tx.Bucket(smallBucketName(addr.Container(), key)) - if smallBucket == nil { - return nil - } - - storageID := smallBucket.Get(objectKey(addr.Object(), key)) - if storageID == nil { - return nil - } - - return bytes.Clone(storageID) -} - -// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation. -type UpdateStorageIDPrm struct { - addr oid.Address - id []byte -} - -// UpdateStorageIDRes groups the resulting values of UpdateStorageID operation. -type UpdateStorageIDRes struct{} - -// SetAddress is an UpdateStorageID option to set the object address to check. -func (p *UpdateStorageIDPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetStorageID is an UpdateStorageID option to set the storage ID. -func (p *UpdateStorageIDPrm) SetStorageID(id []byte) { - p.id = id -} - -// UpdateStorageID updates storage descriptor for objects from the blobstor. -func (db *DB) UpdateStorageID(ctx context.Context, prm UpdateStorageIDPrm) (res UpdateStorageIDRes, err error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("UpdateStorageID", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "metabase.UpdateStorageID", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - attribute.String("storage_id", string(prm.id)), - )) - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return res, ErrDegradedMode - } else if db.mode.ReadOnly() { - return res, ErrReadOnlyMode - } - - err = db.boltDB.Batch(func(tx *bbolt.Tx) error { - return setStorageID(tx, prm.addr, prm.id, true) - }) - success = err == nil - return res, metaerr.Wrap(err) -} diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go deleted file mode 100644 index fef680159..000000000 --- a/pkg/local_object_storage/metabase/storage_id_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package meta_test - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestDB_StorageID(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - raw1 := testutil.GenerateObject() - raw2 := testutil.GenerateObject() - deleted := testutil.GenerateObject() - - storageID := []byte{1, 2, 3, 4} - - // check StorageID from empty database - fetchedStorageID, err := metaStorageID(db, object.AddressOf(raw1)) - require.NoError(t, err) - require.Nil(t, fetchedStorageID) - - // put one object with storageID - err = metaPut(db, raw1, storageID) - require.NoError(t, err) - - // put one object without storageID - err = putBig(db, raw2) - require.NoError(t, err) - - // put object with storageID and delete it - err = metaPut(db, deleted, storageID) - require.NoError(t, err) - - cnrID, ok := deleted.ContainerID() - require.True(t, ok) - ts := testutil.GenerateObjectWithCID(cnrID) - require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object())) - - // check StorageID for object without storageID - fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2)) - require.NoError(t, err) - require.Nil(t, fetchedStorageID) - - // check StorageID for object with storageID - fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw1)) - require.NoError(t, err) - require.Equal(t, storageID, fetchedStorageID) - - // check StorageID for deleted object with storageID - fetchedStorageID, err = metaStorageID(db, object.AddressOf(deleted)) - require.NoError(t, err) - require.Equal(t, storageID, fetchedStorageID) - - t.Run("update", func(t *testing.T) { - storageID := []byte{1, 2, 3, 4, 5} - require.NoError(t, metaUpdateStorageID(db, object.AddressOf(raw2), storageID)) - require.NoError(t, metaUpdateStorageID(db, object.AddressOf(deleted), storageID)) - - fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2)) - require.NoError(t, err) - require.Equal(t, storageID, fetchedStorageID) - - fetchedStorageID, err = metaStorageID(db, object.AddressOf(deleted)) - require.NoError(t, err) - require.Equal(t, storageID, fetchedStorageID) - }) -} - -func TestPutWritecacheDataRace(t *testing.T) { - t.Parallel() - - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - putStorageID := []byte{1, 2, 3} - wcStorageID := []byte{1, 2, 3, 4, 5} - o := testutil.GenerateObject() - - fetchedStorageID, err := metaStorageID(db, object.AddressOf(o)) - require.NoError(t, err) - require.Nil(t, fetchedStorageID) - - // writecache flushes object and updates storageID before object actually saved to the metabase - metaUpdateStorageID(db, object.AddressOf(o), wcStorageID) - - // put object completes with writecache's storageID - err = metaPut(db, o, putStorageID) - require.NoError(t, err) - - fetchedStorageID, err = metaStorageID(db, object.AddressOf(o)) - require.NoError(t, err) - require.Equal(t, wcStorageID, fetchedStorageID) -} - -func metaUpdateStorageID(db *meta.DB, addr oid.Address, id []byte) error { - var sidPrm meta.UpdateStorageIDPrm - sidPrm.SetAddress(addr) - sidPrm.SetStorageID(id) - - _, err := db.UpdateStorageID(context.Background(), sidPrm) - return err -} - -func metaStorageID(db *meta.DB, addr oid.Address) ([]byte, error) { - var sidPrm meta.StorageIDPrm - sidPrm.SetAddress(addr) - - r, err := db.StorageID(context.Background(), sidPrm) - return r.StorageID(), err -} diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go deleted file mode 100644 index 4948f3424..000000000 --- a/pkg/local_object_storage/metabase/upgrade.go +++ /dev/null @@ -1,602 +0,0 @@ -package meta - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "os" - "strconv" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "golang.org/x/sync/errgroup" -) - -const ( - upgradeLogFrequency = 50_000 - upgradeWorkersCount = 1_000 - compactMaxTxSize = 256 << 20 - upgradeTimeout = 1 * time.Second -) - -var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{ - 2: upgradeFromV2ToV3, - 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error { - log("metabase already upgraded") - return nil - }, -} - -func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error { - if _, err := os.Stat(path); err != nil { - return fmt.Errorf("check metabase existence: %w", err) - } - opts := bbolt.DefaultOptions - opts.Timeout = upgradeTimeout - db, err := bbolt.Open(path, os.ModePerm, opts) - if err != nil { - return fmt.Errorf("open metabase: %w", err) - } - var version uint64 - if err := db.View(func(tx *bbolt.Tx) error { - var e error - version, e = currentVersion(tx) - return e - }); err != nil { - return err - } - updater, found := updates[version] - if !found { - return fmt.Errorf("unsupported version %d: no update available", version) - } - if err := db.Update(func(tx *bbolt.Tx) error { - b := tx.Bucket(shardInfoBucket) - return b.Put(upgradeKey, zeroValue) - }); err != nil { - return fmt.Errorf("set upgrade key %w", err) - } - if err := updater(ctx, db, cs, log); err != nil { - return fmt.Errorf("update metabase schema: %w", err) - } - if err := db.Update(func(tx *bbolt.Tx) error { - b := tx.Bucket(shardInfoBucket) - return b.Delete(upgradeKey) - }); err != nil { - return fmt.Errorf("delete upgrade key %w", err) - } - if compact { - log("compacting metabase...") - err := compactDB(db) - if err != nil { - return fmt.Errorf("compact metabase: %w", err) - } - log("metabase compacted") - } - return db.Close() -} - -func compactDB(db *bbolt.DB) error { - sourcePath := db.Path() - tmpFileName := sourcePath + "." + time.Now().Format(time.RFC3339) - f, err := os.Stat(sourcePath) - if err != nil { - return err - } - dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{ - Timeout: 100 * time.Millisecond, - NoSync: true, - }) - if err != nil { - return fmt.Errorf("open new metabase to compact: %w", err) - } - if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil { - return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName))) - } - if err := dst.Sync(); err != nil { - return fmt.Errorf("sync compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName))) - } - if err := dst.Close(); err != nil { - return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName))) - } - if err := db.Close(); err != nil { - return fmt.Errorf("close source metabase: %w", errors.Join(err, os.Remove(tmpFileName))) - } - if err := os.Rename(tmpFileName, sourcePath); err != nil { - return fmt.Errorf("replace source metabase with compacted: %w", errors.Join(err, os.Remove(tmpFileName))) - } - return nil -} - -func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error { - if err := createExpirationEpochBuckets(ctx, db, log); err != nil { - return err - } - eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - return dropUserAttributes(ctx, db, cs, log) - }) - eg.Go(func() error { - return dropOwnerIDIndex(ctx, db, log) - }) - eg.Go(func() error { - return dropPayloadChecksumIndex(ctx, db, log) - }) - if err := eg.Wait(); err != nil { - return err - } - return db.Update(func(tx *bbolt.Tx) error { - return updateVersion(tx, version) - }) -} - -type objectIDToExpEpoch struct { - containerID cid.ID - objectID oid.ID - expirationEpoch uint64 -} - -func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { - log("filling expiration epoch buckets...") - if err := db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(expEpochToObjectBucketName) - return err - }); err != nil { - return err - } - objects := make(chan objectIDToExpEpoch) - eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - return selectObjectsWithExpirationEpoch(ctx, db, objects) - }) - var count atomic.Uint64 - for range upgradeWorkersCount { - eg.Go(func() error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - case obj, ok := <-objects: - if !ok { - return nil - } - if err := db.Batch(func(tx *bbolt.Tx) error { - if err := putUniqueIndexItem(tx, namedBucketItem{ - name: expEpochToObjectBucketName, - key: expirationEpochKey(obj.expirationEpoch, obj.containerID, obj.objectID), - val: zeroValue, - }); err != nil { - return err - } - val := make([]byte, epochSize) - binary.LittleEndian.PutUint64(val, obj.expirationEpoch) - return putUniqueIndexItem(tx, namedBucketItem{ - name: objectToExpirationEpochBucketName(obj.containerID, make([]byte, bucketKeySize)), - key: objectKey(obj.objectID, make([]byte, objectKeySize)), - val: val, - }) - }); err != nil { - return err - } - } - if c := count.Add(1); c%upgradeLogFrequency == 0 { - log("expiration epoch filled for", c, "objects...") - } - } - }) - } - err := eg.Wait() - if err != nil { - log("expiration epoch buckets completed completed with error:", err) - return err - } - log("filling expiration epoch buckets completed successfully, total", count.Load(), "objects") - return nil -} - -func selectObjectsWithExpirationEpoch(ctx context.Context, db *bbolt.DB, objects chan objectIDToExpEpoch) error { - defer close(objects) - - const batchSize = 1000 - it := &objectsWithExpirationEpochBatchIterator{ - lastAttributeKey: usrAttrPrefix, - } - for { - if err := getNextObjectsWithExpirationEpochBatch(ctx, db, it, batchSize); err != nil { - return err - } - for _, item := range it.items { - select { - case <-ctx.Done(): - return ctx.Err() - case objects <- item: - } - } - - if len(it.items) < batchSize { - return nil - } - it.items = nil - } -} - -var ( - usrAttrPrefix = []byte{userAttributePrefix} - errBatchSizeLimit = errors.New("batch size limit") -) - -type objectsWithExpirationEpochBatchIterator struct { - lastAttributeKey []byte - lastAttributeValue []byte - lastAttrKeyValueItem []byte - items []objectIDToExpEpoch -} - -// - {prefix}{containerID}{attributeKey} <- bucket -// -- {attributeValue} <- bucket, expirationEpoch -// --- {objectID}: zeroValue <- record - -func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, it *objectsWithExpirationEpochBatchIterator, batchSize int) error { - seekAttrValue := it.lastAttributeValue - seekAttrKVItem := it.lastAttrKeyValueItem - err := db.View(func(tx *bbolt.Tx) error { - attrKeyC := tx.Cursor() - for attrKey, _ := attrKeyC.Seek(it.lastAttributeKey); attrKey != nil && bytes.HasPrefix(attrKey, usrAttrPrefix); attrKey, _ = attrKeyC.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if len(attrKey) <= 1+cidSize { - continue - } - attributeKey := string(attrKey[1+cidSize:]) - if attributeKey != objectV2.SysAttributeExpEpoch { - continue - } - var containerID cid.ID - if err := containerID.Decode(attrKey[1 : 1+cidSize]); err != nil { - return fmt.Errorf("decode container id from user attribute bucket: %w", err) - } - if err := iterateExpirationAttributeKeyBucket(ctx, tx.Bucket(attrKey), it, batchSize, containerID, attrKey, seekAttrValue, seekAttrKVItem); err != nil { - return err - } - seekAttrValue = nil - seekAttrKVItem = nil - } - return nil - }) - if err != nil && !errors.Is(err, errBatchSizeLimit) { - return err - } - return nil -} - -func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, it *objectsWithExpirationEpochBatchIterator, batchSize int, containerID cid.ID, attrKey, seekAttrValue, seekAttrKVItem []byte) error { - attrValueC := b.Cursor() - for attrValue, v := attrValueC.Seek(seekAttrValue); attrValue != nil; attrValue, v = attrValueC.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if v != nil { - continue // need to iterate over buckets, not records - } - expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64) - if err != nil { - return fmt.Errorf("parse expiration epoch: %w", err) - } - expirationEpochBucket := b.Bucket(attrValue) - attrKeyValueC := expirationEpochBucket.Cursor() - for attrKeyValueItem, v := attrKeyValueC.Seek(seekAttrKVItem); attrKeyValueItem != nil; attrKeyValueItem, v = attrKeyValueC.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if v == nil { - continue // need to iterate over records, not buckets - } - if bytes.Equal(it.lastAttributeKey, attrKey) && bytes.Equal(it.lastAttributeValue, attrValue) && bytes.Equal(it.lastAttrKeyValueItem, attrKeyValueItem) { - continue - } - var objectID oid.ID - if err := objectID.Decode(attrKeyValueItem); err != nil { - return fmt.Errorf("decode object id from container '%s' expiration epoch %d: %w", containerID, expirationEpoch, err) - } - it.lastAttributeKey = bytes.Clone(attrKey) - it.lastAttributeValue = bytes.Clone(attrValue) - it.lastAttrKeyValueItem = bytes.Clone(attrKeyValueItem) - it.items = append(it.items, objectIDToExpEpoch{ - containerID: containerID, - objectID: objectID, - expirationEpoch: expirationEpoch, - }) - if len(it.items) == batchSize { - return errBatchSizeLimit - } - } - seekAttrKVItem = nil - } - return nil -} - -func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error { - log("deleting user attribute buckets...") - const batch = 1000 - prefix := []byte{userAttributePrefix} - last := prefix - var count uint64 - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - var keys [][]byte - if err := db.View(func(tx *bbolt.Tx) error { - c := tx.Cursor() - for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() { - if bytes.Equal(last, k) { - continue - } - keys = append(keys, bytes.Clone(k)) - } - return nil - }); err != nil { - log("deleting user attribute buckets completed with an error:", err) - return err - } - if len(keys) == 0 { - log("deleting user attribute buckets completed successfully, deleted", count, "buckets") - return nil - } - last = keys[len(keys)-1] - cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys) - if err != nil { - log("deleting user attribute buckets completed with an error:", err) - return err - } - count += cnt - cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys) - if err != nil { - log("deleting user attribute buckets completed with an error:", err) - return err - } - count += cnt - log("deleted", count, "user attribute buckets") - } -} - -func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { - keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs) - if err != nil { - return 0, fmt.Errorf("select non indexed user attributes: %w", err) - } - if err := db.Batch(func(tx *bbolt.Tx) error { - for _, k := range keysToDrop { - if err := tx.DeleteBucket(k); err != nil { - return err - } - } - return nil - }); err != nil { - return 0, fmt.Errorf("drop non indexed user attributes: %w", err) - } - return uint64(len(keysToDrop)), nil -} - -func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) { - var keysToDrop [][]byte - for _, key := range keys { - attr, ok := attributeFromAttributeBucket(key) - if !ok { - return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) - } - if !IsAtrributeIndexed(attr) { - keysToDrop = append(keysToDrop, key) - continue - } - contID, ok := cidFromAttributeBucket(key) - if !ok { - return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) - } - info, err := cs.Info(ctx, contID) - if err != nil { - return nil, err - } - if info.Removed || !info.Indexed { - keysToDrop = append(keysToDrop, key) - } - } - return keysToDrop, nil -} - -func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) { - var dropBuckets [][]byte - for _, key := range keys { - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } - - if err := dropEmptyNestedBuckets(ctx, db, key); err != nil { - return 0, err - } - - empty, exists, err := bucketIsEmpty(db, key) - if err != nil { - return 0, err - } - if empty && exists { - dropBuckets = append(dropBuckets, key) - } - } - if len(dropBuckets) == 0 { - return 0, nil - } - if err := db.Batch(func(tx *bbolt.Tx) error { - for _, key := range dropBuckets { - if err := tx.DeleteBucket(key); err != nil { - return err - } - } - return nil - }); err != nil { - return 0, fmt.Errorf("drop empty user attributes buckets: %w", err) - } - return uint64(len(dropBuckets)), nil -} - -func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) { - var empty bool - var exists bool - if err := db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(bucketKey) - if b == nil { - return nil - } - exists = true - empty = !hasAnyItem(b) - return nil - }); err != nil { - return false, false, fmt.Errorf("bucket empty check: %w", err) - } - return empty, exists, nil -} - -func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error { - var last []byte - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var dropBuckets [][]byte - var err error - dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last) - if err != nil { - return fmt.Errorf("select empty nested buckets: %w", err) - } - if len(dropBuckets) == 0 { - return nil - } - - if err := db.Batch(func(tx *bbolt.Tx) error { - rootBucket := tx.Bucket(rootBucketKey) - if rootBucket == nil { - return nil - } - for _, sb := range dropBuckets { - if err := rootBucket.DeleteBucket(sb); err != nil { - return err - } - } - return nil - }); err != nil { - return fmt.Errorf("drop empty nested buckets: %w", err) - } - } -} - -func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) { - const batchSize = 1000 - var result [][]byte - if err := db.View(func(tx *bbolt.Tx) error { - rootBucket := tx.Bucket(rootBucketKey) - if rootBucket == nil { - return nil - } - c := rootBucket.Cursor() - for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if bytes.Equal(last, k) { - continue - } - last = bytes.Clone(k) - if v != nil { // record - continue - } - nestedBucket := rootBucket.Bucket(k) - if nestedBucket == nil { - continue - } - if !hasAnyItem(nestedBucket) { - result = append(result, bytes.Clone(k)) - } - } - return nil - }); err != nil { - return nil, nil, err - } - return result, last, nil -} - -func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { - return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) { - log(append([]any{"owner ID index:"}, a...)...) - }) -} - -func dropPayloadChecksumIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { - return dropBucketsByPrefix(ctx, db, []byte{payloadHashPrefix}, func(a ...any) { - log(append([]any{"payload checksum:"}, a...)...) - }) -} - -func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log func(a ...any)) error { - log("deleting buckets...") - const batch = 1000 - var count uint64 - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - var keys [][]byte - if err := db.View(func(tx *bbolt.Tx) error { - c := tx.Cursor() - for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() { - keys = append(keys, bytes.Clone(k)) - } - return nil - }); err != nil { - log("deleting buckets completed with an error:", err) - return err - } - if len(keys) == 0 { - log("deleting buckets completed successfully, deleted", count, "buckets") - return nil - } - if err := db.Batch(func(tx *bbolt.Tx) error { - for _, k := range keys { - if err := tx.DeleteBucket(k); err != nil { - return err - } - } - return nil - }); err != nil { - log("deleting buckets completed with an error:", err) - return err - } - count += uint64(len(keys)) - log("deleted", count, "buckets") - } -} diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go deleted file mode 100644 index c90de4dd6..000000000 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ /dev/null @@ -1,222 +0,0 @@ -//go:build integration - -package meta - -import ( - "context" - "fmt" - "io" - "os" - "strconv" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -const upgradeFilePath = "/path/to/metabase.v2" - -func TestUpgradeV2ToV3(t *testing.T) { - path := createTempCopy(t, upgradeFilePath) - defer func() { - require.NoError(t, os.Remove(path)) - }() - db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t))) - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion) - require.NoError(t, db.Close(context.Background())) - require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log)) - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - require.NoError(t, db.Close(context.Background())) - fmt.Println() -} - -type testContainerInfoProvider struct{} - -func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) { - return container.Info{}, nil -} - -func createTempCopy(t *testing.T, path string) string { - src, err := os.Open(path) - require.NoError(t, err) - - tmpPath := upgradeFilePath + time.Now().Format(time.RFC3339) - dest, err := os.Create(tmpPath) - require.NoError(t, err) - - _, err = io.Copy(dest, src) - require.NoError(t, err) - - require.NoError(t, src.Close()) - require.NoError(t, dest.Close()) - - return tmpPath -} - -func TestGenerateMetabaseFile(t *testing.T) { - t.Skip("for generating db") - const ( - containersCount = 10_000 - simpleObjectsCount = 500_000 - complexObjectsCount = 500_000 // x2 - deletedByGCMarksCount = 100_000 - deletedByTombstoneCount = 100_000 // x2 - lockedCount = 100_000 // x2 - - allocSize = 128 << 20 - generateWorkersCount = 1_000 - minEpoch = 1_000 - maxFilename = 1_000 - maxStorageID = 10_000 - ) - - db := New(WithPath(upgradeFilePath), WithEpochState(epochState{e: minEpoch}), WithLogger(test.NewLogger(t))) - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - db.boltDB.AllocSize = allocSize - db.boltDB.NoSync = true - require.NoError(t, db.Init(context.Background())) - containers := make([]cid.ID, containersCount) - for i := range containers { - containers[i] = cidtest.ID() - } - oc, err := db.ObjectCounters() - require.NoError(t, err) - require.True(t, oc.IsZero()) - eg, ctx := errgroup.WithContext(context.Background()) - eg.SetLimit(generateWorkersCount) - // simple objects - for i := range simpleObjectsCount { - i := i - eg.Go(func() error { - obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) - testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) - _, err := db.Put(ctx, PutPrm{ - obj: obj, - id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), - }) - require.NoError(t, err) - return nil - }) - } - require.NoError(t, eg.Wait()) - db.log.Info(ctx, "simple objects generated") - eg, ctx = errgroup.WithContext(context.Background()) - eg.SetLimit(generateWorkersCount) - // complex objects - for i := range complexObjectsCount { - i := i - eg.Go(func() error { - parent := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - child := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) - testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) - testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) - testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) - _, err := db.Put(ctx, PutPrm{ - obj: child, - }) - require.NoError(t, err) - return nil - }) - } - require.NoError(t, eg.Wait()) - db.log.Info(ctx, "complex objects generated") - eg, ctx = errgroup.WithContext(context.Background()) - eg.SetLimit(generateWorkersCount) - // simple objects deleted by gc marks - for i := range deletedByGCMarksCount { - i := i - eg.Go(func() error { - obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) - _, err := db.Put(ctx, PutPrm{ - obj: obj, - id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), - }) - require.NoError(t, err) - _, err = db.Inhume(ctx, InhumePrm{ - target: []oid.Address{object.AddressOf(obj)}, - }) - require.NoError(t, err) - return nil - }) - } - require.NoError(t, eg.Wait()) - db.log.Info(ctx, "simple objects deleted by gc marks generated") - eg, ctx = errgroup.WithContext(context.Background()) - eg.SetLimit(10000) - // simple objects deleted by tombstones - for i := range deletedByTombstoneCount { - i := i - eg.Go(func() error { - obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) - _, err := db.Put(ctx, PutPrm{ - obj: obj, - id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), - }) - tomb := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - tomb.SetType(objectSDK.TypeTombstone) - _, err = db.Put(ctx, PutPrm{ - obj: tomb, - id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), - }) - require.NoError(t, err) - tombAddr := object.AddressOf(tomb) - _, err = db.Inhume(ctx, InhumePrm{ - target: []oid.Address{object.AddressOf(obj)}, - tomb: &tombAddr, - }) - require.NoError(t, err) - return nil - }) - } - require.NoError(t, eg.Wait()) - db.log.Info(ctx, "simple objects deleted by tombstones generated") - eg, ctx = errgroup.WithContext(context.Background()) - eg.SetLimit(generateWorkersCount) - // simple objects locked by locks - for i := range lockedCount { - i := i - eg.Go(func() error { - obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) - _, err := db.Put(ctx, PutPrm{ - obj: obj, - id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), - }) - lock := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - lock.SetType(objectSDK.TypeLock) - testutil.AddAttribute(lock, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) - _, err = db.Put(ctx, PutPrm{ - obj: lock, - id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), - }) - require.NoError(t, err) - err = db.Lock(ctx, containers[i%len(containers)], object.AddressOf(lock).Object(), []oid.ID{object.AddressOf(obj).Object()}) - require.NoError(t, err) - return nil - }) - } - require.NoError(t, eg.Wait()) - db.log.Info(ctx, "simple objects locked by locks generated") - require.NoError(t, db.boltDB.Sync()) - require.NoError(t, db.Close(context.Background())) -} diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go deleted file mode 100644 index 4ad83332b..000000000 --- a/pkg/local_object_storage/metabase/util.go +++ /dev/null @@ -1,310 +0,0 @@ -package meta - -import ( - "crypto/sha256" - "encoding/binary" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" -) - -var ( - // graveyardBucketName stores rows with the objects that have been - // covered with Tombstone objects. That objects should not be returned - // from the node and should not be accepted by the node from other - // nodes. - graveyardBucketName = []byte{graveyardPrefix} - // garbageBucketName stores rows with the objects that should be physically - // deleted by the node (Garbage Collector routine). - garbageBucketName = []byte{garbagePrefix} - toMoveItBucketName = []byte{toMoveItPrefix} - containerVolumeBucketName = []byte{containerVolumePrefix} - containerCounterBucketName = []byte{containerCountersPrefix} - expEpochToObjectBucketName = []byte{expirationEpochToObjectPrefix} - - zeroValue = []byte{0xFF} - - errInvalidLength = errors.New("invalid length") -) - -// Prefix bytes for database keys. All ids and addresses are encoded in binary -// unless specified otherwise. -// -//nolint:godot -const ( - // graveyardPrefix is used for the graveyard bucket. - // Key: object address - // Value: tombstone address - graveyardPrefix = iota - // garbagePrefix is used for the garbage bucket. - // Key: object address - // Value: dummy value - garbagePrefix - // toMoveItPrefix is used for bucket containing IDs of objects that are candidates for moving - // to another shard. - toMoveItPrefix - // containerVolumePrefix is used for storing container size estimations. - // Key: container ID - // Value: container size in bytes as little-endian uint64 - containerVolumePrefix - // lockedPrefix is used for storing locked objects information. - // Key: container ID - // Value: bucket mapping objects locked to the list of corresponding LOCK objects. - lockedPrefix - // shardInfoPrefix is used for storing shard ID. All keys are custom and are not connected to the container. - shardInfoPrefix - - // ====================== - // Unique index buckets. - // ====================== - - // primaryPrefix is used for prefixing buckets containing objects of REGULAR type. - // Key: object ID - // Value: marshalled object - primaryPrefix - // lockersPrefix is used for prefixing buckets containing objects of LOCK type. - // Key: object ID - // Value: marshalled object - lockersPrefix - // _ is unused. Previous usage was for prefixing buckets containing objects of STORAGEGROUP type. - // Key: object ID - // Value: marshaled object - _ - // tombstonePrefix is used for prefixing buckets containing objects of TOMBSTONE type. - // Key: object ID - // Value: marshaled object - tombstonePrefix - // smallPrefix is used for prefixing buckets mapping objects to the blobovniczas they are stored in. - // Key: object ID - // Value: blobovnicza ID - smallPrefix - // rootPrefix is used for prefixing buckets mapping parent object to the split info. - // Key: object ID - // Value: split info - rootPrefix - - // ==================== - // FKBT index buckets. - // ==================== - - // ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs. - // Key: owner ID - // Value: bucket containing object IDs as keys - // removed in version 3 - ownerPrefix - // userAttributePrefix was used for prefixing FKBT index buckets containing objects. - // Key: attribute value - // Value: bucket containing object IDs as keys - userAttributePrefix - - // ==================== - // List index buckets. - // ==================== - - // payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs. - // Key: payload hash - // Value: list of object IDs - // removed in version 3 - payloadHashPrefix - // parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs. - // Key: parent ID - // Value: list of object IDs - parentPrefix - // splitPrefix is used for prefixing List index buckets mapping split ID to a list of object IDs. - // Key: split ID - // Value: list of object IDs - splitPrefix - - // containerCountersPrefix is used for storing container object counters. - // Key: container ID + type - // Value: container size in bytes as little-endian uint64 - containerCountersPrefix - - // ecInfoPrefix is used for storing relation between EC parent id and chunk id. - // Key: container ID + type - // Value: Object id - ecInfoPrefix - - // expirationEpochToObjectPrefix is used for storing relation between expiration epoch and object id. - // Key: expiration epoch + object address - // Value: zero - expirationEpochToObjectPrefix - - // objectToExpirationEpochPrefix is used for storing relation between expiration epoch and object id. - // Key: object address - // Value: expiration epoch - objectToExpirationEpochPrefix -) - -const ( - cidSize = sha256.Size - bucketKeySize = 1 + cidSize - objectKeySize = sha256.Size - addressKeySize = cidSize + objectKeySize - epochSize = 8 -) - -func bucketName(cnr cid.ID, prefix byte, key []byte) []byte { - key[0] = prefix - cnr.Encode(key[1:]) - return key[:bucketKeySize] -} - -// primaryBucketName returns . -func primaryBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, primaryPrefix, key) -} - -// tombstoneBucketName returns _TS. -func tombstoneBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, tombstonePrefix, key) -} - -// smallBucketName returns _small. -func smallBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, smallPrefix, key) -} - -// attributeBucketName returns _. -func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte { - key[0] = userAttributePrefix - cnr.Encode(key[1:]) - return append(key[:bucketKeySize], attributeKey...) -} - -func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) { - if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix { - return cid.ID{}, false - } - var result cid.ID - return result, result.Decode(bucketName[1:bucketKeySize]) == nil -} - -func attributeFromAttributeBucket(bucketName []byte) (string, bool) { - if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix { - return "", false - } - return string(bucketName[bucketKeySize:]), true -} - -// rootBucketName returns _root. -func rootBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, rootPrefix, key) -} - -// parentBucketName returns _parent. -func parentBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, parentPrefix, key) -} - -// splitBucketName returns _splitid. -func splitBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, splitPrefix, key) -} - -// ecInfoBucketName returns _ecinfo. -func ecInfoBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, ecInfoPrefix, key) -} - -// objectToExpirationEpochBucketName returns objectToExpirationEpochPrefix_. -func objectToExpirationEpochBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, objectToExpirationEpochPrefix, key) -} - -func expirationEpochKey(epoch uint64, cnr cid.ID, obj oid.ID) []byte { - result := make([]byte, epochSize+addressKeySize) - binary.BigEndian.PutUint64(result, epoch) - cnr.Encode(result[epochSize:]) - obj.Encode(result[epochSize+cidSize:]) - return result -} - -func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) { - if len(key) != epochSize+addressKeySize { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("unexpected expiration epoch to object key length: %d", len(key)) - } - epoch := binary.BigEndian.Uint64(key) - var cnr cid.ID - if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err) - } - var obj oid.ID - if err := obj.Decode(key[epochSize+cidSize:]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err) - } - return epoch, cnr, obj, nil -} - -// addressKey returns key for K-V tables when key is a whole address. -func addressKey(addr oid.Address, key []byte) []byte { - addr.Container().Encode(key) - addr.Object().Encode(key[cidSize:]) - return key[:addressKeySize] -} - -// parses object address formed by addressKey. -func decodeAddressFromKey(dst *oid.Address, k []byte) error { - if len(k) != addressKeySize { - return errInvalidLength - } - - var cnr cid.ID - if err := cnr.Decode(k[:cidSize]); err != nil { - return err - } - - var obj oid.ID - if err := obj.Decode(k[cidSize:]); err != nil { - return err - } - - dst.SetObject(obj) - dst.SetContainer(cnr) - return nil -} - -// objectKey returns key for K-V tables when key is an object id. -func objectKey(obj oid.ID, key []byte) []byte { - obj.Encode(key) - return key[:objectKeySize] -} - -// if meets irregular object container in objs - returns its type, otherwise returns object.TypeRegular. -// -// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. -func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") - - var keys [2][1 + cidSize]byte - - irregularTypeBuckets := [...]struct { - typ objectSDK.Type - name []byte - }{ - {objectSDK.TypeTombstone, tombstoneBucketName(idCnr, keys[0][:])}, - {objectSDK.TypeLock, bucketNameLockers(idCnr, keys[1][:])}, - } - - for i := range objs { - for j := range irregularTypeBuckets { - if inBucket(tx, irregularTypeBuckets[j].name, objs[i]) { - return irregularTypeBuckets[j].typ - } - } - } - - return objectSDK.TypeRegular -} - -// return true if provided object is of LOCK type. -func isLockObject(tx *bbolt.Tx, idCnr cid.ID, obj oid.ID) bool { - return inBucket(tx, - bucketNameLockers(idCnr, make([]byte, bucketKeySize)), - objectKey(obj, make([]byte, objectKeySize))) -} diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go deleted file mode 100644 index fbc0f1ad9..000000000 --- a/pkg/local_object_storage/metabase/version.go +++ /dev/null @@ -1,85 +0,0 @@ -package meta - -import ( - "encoding/binary" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "go.etcd.io/bbolt" -) - -// version contains current metabase version. -const version = 3 - -var ( - versionKey = []byte("version") - upgradeKey = []byte("upgrade") -) - -// ErrOutdatedVersion is returned on initializing -// an existing metabase that is not compatible with -// the current code version. -var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required") - -var ErrIncompletedUpgrade = logicerr.New("metabase upgrade is not completed") - -var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket") - -func checkVersion(tx *bbolt.Tx, initialized bool) error { - var knownVersion bool - - b := tx.Bucket(shardInfoBucket) - if b != nil { - data := b.Get(versionKey) - if len(data) == 8 { - knownVersion = true - - stored := binary.LittleEndian.Uint64(data) - if stored != version { - return fmt.Errorf("%w: expected=%d, stored=%d", ErrOutdatedVersion, version, stored) - } - } - data = b.Get(upgradeKey) - if len(data) > 0 { - return ErrIncompletedUpgrade - } - } - - if !initialized { - // new database, write version - return updateVersion(tx, version) - } else if !knownVersion { - // db is initialized but no version - // has been found; that could happen - // if the db is corrupted or the version - // is <2 (is outdated and requires resync - // anyway) - return ErrOutdatedVersion - } - - return nil -} - -func updateVersion(tx *bbolt.Tx, version uint64) error { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, version) - - b, err := tx.CreateBucketIfNotExists(shardInfoBucket) - if err != nil { - return fmt.Errorf("create auxiliary bucket: %w", err) - } - return b.Put(versionKey, data) -} - -func currentVersion(tx *bbolt.Tx) (uint64, error) { - b := tx.Bucket(shardInfoBucket) - if b == nil { - return 0, errVersionUndefinedNoInfoBucket - } - data := b.Get(versionKey) - if len(data) != 8 { - return 0, fmt.Errorf("version undefined: invalid version data length %d", len(data)) - } - return binary.LittleEndian.Uint64(data), nil -} diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go deleted file mode 100644 index b373fb32e..000000000 --- a/pkg/local_object_storage/metabase/version_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package meta - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -type epochStateImpl struct{} - -func (s epochStateImpl) CurrentEpoch() uint64 { - return 0 -} - -func TestVersion(t *testing.T) { - dir := t.TempDir() - - newDB := func(t *testing.T) *DB { - return New(WithPath(filepath.Join(dir, t.Name())), - WithPermissions(0o600), WithEpochState(epochStateImpl{})) - } - check := func(t *testing.T, db *DB) { - require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(shardInfoBucket) - if b == nil { - return errors.New("shard info bucket not found") - } - data := b.Get(versionKey) - if len(data) != 8 { - return errors.New("invalid version data") - } - if stored := binary.LittleEndian.Uint64(data); stored != version { - return fmt.Errorf("invalid version: %d != %d", stored, version) - } - return nil - })) - } - t.Run("simple", func(t *testing.T) { - db := newDB(t) - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - check(t, db) - require.NoError(t, db.Close(context.Background())) - - t.Run("reopen", func(t *testing.T) { - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - check(t, db) - require.NoError(t, db.Close(context.Background())) - }) - }) - t.Run("old data", func(t *testing.T) { - db := newDB(t) - require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite)) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - check(t, db) - require.NoError(t, db.Close(context.Background())) - }) - t.Run("invalid version", func(t *testing.T) { - db := newDB(t) - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { - return updateVersion(tx, version+1) - })) - require.NoError(t, db.Close(context.Background())) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.Error(t, db.Init(context.Background())) - require.NoError(t, db.Close(context.Background())) - - t.Run("reset", func(t *testing.T) { - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Reset()) - check(t, db) - require.NoError(t, db.Close(context.Background())) - }) - }) - t.Run("incompleted upgrade", func(t *testing.T) { - db := newDB(t) - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init(context.Background())) - require.NoError(t, db.Close(context.Background())) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { - return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue) - })) - require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade) - require.NoError(t, db.Close(context.Background())) - - require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { - return tx.Bucket(shardInfoBucket).Delete(upgradeKey) - })) - require.NoError(t, db.Init(context.Background())) - require.NoError(t, db.Close(context.Background())) - }) -} diff --git a/pkg/local_object_storage/metrics/blobovnicza.go b/pkg/local_object_storage/metrics/blobovnicza.go deleted file mode 100644 index 460d6e638..000000000 --- a/pkg/local_object_storage/metrics/blobovnicza.go +++ /dev/null @@ -1,123 +0,0 @@ -package metrics - -import ( - "time" - - metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -func NewBlobovniczaTreeMetrics(path string, m metrics_impl.BlobobvnizcaMetrics) blobovniczatree.Metrics { - return &blobovniczaTreeMetrics{ - path: path, - shardID: undefined, - m: m, - } -} - -type blobovniczaTreeMetrics struct { - shardID string - path string - m metrics_impl.BlobobvnizcaMetrics -} - -func (m *blobovniczaTreeMetrics) Blobovnicza() blobovnicza.Metrics { - return &blobovniczaMetrics{ - shardID: func() string { return m.shardID }, - path: m.path, - m: m.m, - } -} - -func (m *blobovniczaTreeMetrics) SetParentID(parentID string) { - m.shardID = parentID -} - -func (m *blobovniczaTreeMetrics) SetMode(mod mode.ComponentMode) { - m.m.SetBlobobvnizcaTreeMode(m.shardID, m.path, mod) -} - -func (m *blobovniczaTreeMetrics) Close() { - m.m.CloseBlobobvnizcaTree(m.shardID, m.path) -} - -func (m *blobovniczaTreeMetrics) SetRebuildStatus(status string) { - m.m.BlobovniczaTreeRebuildStatus(m.shardID, m.path, status) -} - -func (m *blobovniczaTreeMetrics) SetRebuildPercent(value uint32) { - m.m.BlobovniczaTreeRebuildPercent(m.shardID, m.path, value) -} - -func (m *blobovniczaTreeMetrics) ObjectMoved(d time.Duration) { - m.m.BlobovniczaTreeObjectMoved(m.shardID, m.path, d) -} - -func (m *blobovniczaTreeMetrics) Delete(d time.Duration, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) -} - -func (m *blobovniczaTreeMetrics) Exists(d time.Duration, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Exists", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) -} - -func (m *blobovniczaTreeMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "GetRange", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) - if success { - m.m.AddBlobobvnizcaTreeGet(m.shardID, m.path, size) - } -} - -func (m *blobovniczaTreeMetrics) Get(d time.Duration, size int, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Get", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) - if success { - m.m.AddBlobobvnizcaTreeGet(m.shardID, m.path, size) - } -} - -func (m *blobovniczaTreeMetrics) Iterate(d time.Duration, success bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Iterate", d, success, metrics_impl.NullBool{}) -} - -func (m *blobovniczaTreeMetrics) Put(d time.Duration, size int, success bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Put", d, success, metrics_impl.NullBool{}) - if success { - m.m.AddBlobobvnizcaTreePut(m.shardID, m.path, size) - } -} - -func (m *blobovniczaTreeMetrics) ObjectsCount(d time.Duration, success bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "ObjectsCount", d, success, metrics_impl.NullBool{}) -} - -type blobovniczaMetrics struct { - m metrics_impl.BlobobvnizcaMetrics - shardID func() string - path string -} - -func (m *blobovniczaMetrics) AddOpenBlobovniczaSize(size uint64) { - m.m.AddOpenBlobovniczaSize(m.shardID(), m.path, size) -} - -func (m *blobovniczaMetrics) SubOpenBlobovniczaSize(size uint64) { - m.m.SubOpenBlobovniczaSize(m.shardID(), m.path, size) -} - -func (m *blobovniczaMetrics) IncOpenBlobovniczaCount() { - m.m.IncOpenBlobovniczaCount(m.shardID(), m.path) -} - -func (m *blobovniczaMetrics) DecOpenBlobovniczaCount() { - m.m.DecOpenBlobovniczaCount(m.shardID(), m.path) -} - -func (m *blobovniczaMetrics) AddOpenBlobovniczaItems(items uint64) { - m.m.AddOpenBlobovniczaItems(m.shardID(), m.path, items) -} - -func (m *blobovniczaMetrics) SubOpenBlobovniczaItems(items uint64) { - m.m.SubOpenBlobovniczaItems(m.shardID(), m.path, items) -} diff --git a/pkg/local_object_storage/metrics/blobstore.go b/pkg/local_object_storage/metrics/blobstore.go deleted file mode 100644 index 9a41f01d0..000000000 --- a/pkg/local_object_storage/metrics/blobstore.go +++ /dev/null @@ -1,69 +0,0 @@ -package metrics - -import ( - "time" - - metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" -) - -type blobstoreMetrics struct { - shardID string - m metrics_impl.BlobstoreMetrics -} - -func NewBlobstoreMetrics(m metrics_impl.BlobstoreMetrics) blobstor.Metrics { - return &blobstoreMetrics{ - shardID: undefined, - m: m, - } -} - -func (m *blobstoreMetrics) SetParentID(parentID string) { - m.shardID = parentID -} - -func (m *blobstoreMetrics) SetMode(readOnly bool) { - m.m.SetMode(m.shardID, readOnly) -} - -func (m *blobstoreMetrics) Close() { - m.m.Close(m.shardID) -} - -func (m *blobstoreMetrics) Delete(d time.Duration, success, withStorageID bool) { - m.m.MethodDuration(m.shardID, "Delete", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true}) -} - -func (m *blobstoreMetrics) Exists(d time.Duration, success, withStorageID bool) { - m.m.MethodDuration(m.shardID, "Exists", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true}) -} - -func (m *blobstoreMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) { - m.m.MethodDuration(m.shardID, "GetRange", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true}) - if success { - m.m.AddGet(m.shardID, size) - } -} - -func (m *blobstoreMetrics) Get(d time.Duration, size int, success, withStorageID bool) { - m.m.MethodDuration(m.shardID, "Get", d, success, metrics_impl.NullBool{Bool: withStorageID, Valid: true}) - if success { - m.m.AddGet(m.shardID, size) - } -} - -func (m *blobstoreMetrics) Iterate(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, "Iterate", d, success, metrics_impl.NullBool{}) -} - -func (m *blobstoreMetrics) Put(d time.Duration, size int, success bool) { - m.m.MethodDuration(m.shardID, "Put", d, success, metrics_impl.NullBool{}) - if success { - m.m.AddPut(m.shardID, size) - } -} - -func (m *blobstoreMetrics) ObjectsCount(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, "ObjectsCount", d, success, metrics_impl.NullBool{}) -} diff --git a/pkg/local_object_storage/metrics/consts.go b/pkg/local_object_storage/metrics/consts.go deleted file mode 100644 index 519930710..000000000 --- a/pkg/local_object_storage/metrics/consts.go +++ /dev/null @@ -1,3 +0,0 @@ -package metrics - -const undefined = "undefined" diff --git a/pkg/local_object_storage/metrics/fstree.go b/pkg/local_object_storage/metrics/fstree.go deleted file mode 100644 index d93363fa3..000000000 --- a/pkg/local_object_storage/metrics/fstree.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "time" - - metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -func NewFSTreeMetricsWithoutShardID(path string, m metrics_impl.FSTreeMetrics) fstree.Metrics { - return &fstreeMetrics{ - shardID: undefined, - path: path, - m: m, - } -} - -type fstreeMetrics struct { - shardID string - path string - m metrics_impl.FSTreeMetrics -} - -func (m *fstreeMetrics) SetParentID(parentID string) { - m.shardID = parentID -} - -func (m *fstreeMetrics) SetMode(mod mode.ComponentMode) { - m.m.SetMode(m.shardID, m.path, mod) -} - -func (m *fstreeMetrics) Close() { - m.m.Close(m.shardID, m.path) -} - -func (m *fstreeMetrics) Iterate(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success) -} - -func (m *fstreeMetrics) IterateInfo(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, m.path, "IterateInfo", d, success) -} - -func (m *fstreeMetrics) Delete(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, m.path, "Delete", d, success) -} - -func (m *fstreeMetrics) Exists(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, m.path, "Exists", d, success) -} - -func (m *fstreeMetrics) Put(d time.Duration, size int, success bool) { - m.m.MethodDuration(m.shardID, m.path, "Put", d, success) - if success { - m.m.AddPut(m.shardID, m.path, size) - } -} - -func (m *fstreeMetrics) Get(d time.Duration, size int, success bool) { - m.m.MethodDuration(m.shardID, m.path, "Get", d, success) - if success { - m.m.AddGet(m.shardID, m.path, size) - } -} - -func (m *fstreeMetrics) GetRange(d time.Duration, size int, success bool) { - m.m.MethodDuration(m.shardID, m.path, "GetRange", d, success) - if success { - m.m.AddGet(m.shardID, m.path, size) - } -} - -func (m *fstreeMetrics) ObjectsCount(d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, m.path, "ObjectsCount", d, success) -} diff --git a/pkg/local_object_storage/metrics/metabase.go b/pkg/local_object_storage/metrics/metabase.go deleted file mode 100644 index e962e37cb..000000000 --- a/pkg/local_object_storage/metrics/metabase.go +++ /dev/null @@ -1,39 +0,0 @@ -package metrics - -import ( - "time" - - metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -func NewMetabaseMetrics(path string, m metrics_impl.MetabaseMetrics) meta.Metrics { - return &metabaseMetrics{ - shardID: undefined, - path: path, - m: m, - } -} - -type metabaseMetrics struct { - shardID string - path string - m metrics_impl.MetabaseMetrics -} - -func (m *metabaseMetrics) SetParentID(parentID string) { - m.shardID = parentID -} - -func (m *metabaseMetrics) SetMode(mode mode.ComponentMode) { - m.m.SetMode(m.shardID, m.path, mode.String()) -} - -func (m *metabaseMetrics) Close() { - m.m.Close(m.shardID, m.path) -} - -func (m *metabaseMetrics) AddMethodDuration(method string, d time.Duration, success bool) { - m.m.MethodDuration(m.shardID, m.path, method, d, success) -} diff --git a/pkg/local_object_storage/metrics/pilorama.go b/pkg/local_object_storage/metrics/pilorama.go deleted file mode 100644 index 050b769a0..000000000 --- a/pkg/local_object_storage/metrics/pilorama.go +++ /dev/null @@ -1,37 +0,0 @@ -package metrics - -import ( - "time" - - metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -func NewPiloramaMetrics(m metrics_impl.PiloramaMetrics) pilorama.Metrics { - return &piloramaMetrics{ - shardID: undefined, - m: m, - } -} - -type piloramaMetrics struct { - shardID string - m metrics_impl.PiloramaMetrics -} - -func (m *piloramaMetrics) SetParentID(id string) { - m.shardID = id -} - -func (m *piloramaMetrics) SetMode(mod mode.ComponentMode) { - m.m.SetMode(m.shardID, mod) -} - -func (m *piloramaMetrics) Close() { - m.m.Close(m.shardID) -} - -func (m *piloramaMetrics) AddMethodDuration(method string, d time.Duration, success bool) { - m.m.AddMethodDuration(m.shardID, method, d, success) -} diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go deleted file mode 100644 index 4c5238921..000000000 --- a/pkg/local_object_storage/pilorama/batch.go +++ /dev/null @@ -1,118 +0,0 @@ -package pilorama - -import ( - "cmp" - "encoding/binary" - "slices" - "sync" - "time" - - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.etcd.io/bbolt" -) - -type batch struct { - forest *boltForest - timer *time.Timer - // mtx protects timer and operations fields. - // Because mtx can be taken inside a transaction, - // transactions MUST NOT be executed with the mutex taken to avoid a deadlock. - mtx sync.Mutex - start sync.Once - cid cidSDK.ID - treeID string - results []chan<- error - operations []*Move -} - -func (b *batch) trigger() { - b.mtx.Lock() - if b.timer != nil { - b.timer.Stop() - } - b.mtx.Unlock() - b.start.Do(b.run) -} - -func (b *batch) run() { - fullID := bucketName(b.cid, b.treeID) - err := b.forest.db.Update(func(tx *bbolt.Tx) error { - bLog, bTree, err := b.forest.getTreeBuckets(tx, fullID) - if err != nil { - return err - } - - b.mtx.Lock() - b.timer = nil - b.mtx.Unlock() - - // Sorting without a mutex is ok, because we append to this slice only if timer is non-nil. - // See (*boltForest).addBatch for details. - slices.SortFunc(b.operations, func(mi, mj *Move) int { - return cmp.Compare(mi.Time, mj.Time) - }) - b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time }) - - // Our main use-case is addition of new items. In this case, - // we do not need to perform undo()/redo(), just do(). - // https://github.com/trvedata/move-op/blob/6c23447c12a7862ff31b7fc2205f6c90fbdb9dc0/proof/Move_Create.thy#L259 - // - // For this optimization to work we need to ensure three things: - // 1. The node itself is not yet in tree. - // 2. The node is not a parent. This case is not mentioned in the article, because - // they consider a "static order" (perform all CREATE operations before MOVE). - // We need this because if node _is_ a parent, we could violate (3) for some late operation. - // See TestForest_ApplySameOperation for details. - // 3. Parent of each operation is already in tree. - var parents map[uint64]struct{} - var cKey [maxKeySize]byte - var slow bool - for i := range b.operations { - _, _, _, inTree := b.forest.getState(bTree, stateKey(cKey[:], b.operations[i].Child)) - if inTree { - slow = true - break - } - - key := childrenKey(cKey[:], b.operations[i].Child, 0) - k, _ := bTree.Cursor().Seek(key) - if len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == b.operations[i].Child { - slow = true - break - } - - if b.operations[i].Parent == RootID { - continue - } else if parents == nil { - // Attaching key only to root is done frequently, - // no allocations are performed unless necessary. - parents = make(map[uint64]struct{}) - } else if _, ok := parents[b.operations[i].Parent]; ok { - continue - } - - p := b.operations[i].Parent - _, ts, _, inTree := b.forest.getState(bTree, stateKey(cKey[:], p)) - if !inTree || b.operations[0].Time < ts { - slow = true - break - } - parents[b.operations[i].Parent] = struct{}{} - } - - if slow { - var lm Move - return b.forest.applyOperation(bLog, bTree, b.operations, &lm) - } - - for i := range b.operations { - if err := b.forest.do(bLog, bTree, cKey[:], b.operations[i]); err != nil { - return err - } - } - return nil - }) - for i := range b.results { - b.results[i] <- err - } -} diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go deleted file mode 100644 index 3156751f2..000000000 --- a/pkg/local_object_storage/pilorama/bench_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package pilorama - -import ( - "context" - "os" - "path/filepath" - "runtime" - "sync/atomic" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" -) - -func getTimestamp(reorder int, ts Timestamp) Timestamp { - base := ts / Timestamp(reorder) - rem := ts % Timestamp(reorder) - return base*Timestamp(reorder) + Timestamp(reorder) - rem -} - -func BenchmarkCreate(b *testing.B) { - // Use `os.TempDir` because we construct multiple times in the same test. - tmpDir, err := os.MkdirTemp(os.TempDir(), "*") - require.NoError(b, err) - - f := NewBoltForest( - WithPath(filepath.Join(tmpDir, "test.db")), - WithMaxBatchSize(runtime.GOMAXPROCS(0))) - require.NoError(b, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(b, f.Init(context.Background())) - defer func() { require.NoError(b, f.Close(context.Background())) }() - - b.Cleanup(func() { - require.NoError(b, os.RemoveAll(tmpDir)) - }) - - cid := cidtest.ID() - treeID := "tree" - ctx := context.Background() - var index atomic.Int32 - index.Store(-1) - b.SetParallelism(2) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - i := index.Add(1) - op := &Move{ - Meta: Meta{Time: getTimestamp(runtime.GOMAXPROCS(0)*2, Timestamp(i+1))}, - Child: Node(i + 1), - Parent: RootID, - } - if err := f.TreeApply(ctx, cid, treeID, op, true); err != nil { - b.FailNow() - } - } - }) -} diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go deleted file mode 100644 index 897b37ea0..000000000 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ /dev/null @@ -1,1681 +0,0 @@ -package pilorama - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "math/rand" - "os" - "path/filepath" - "slices" - "strconv" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/nspcc-dev/neo-go/pkg/io" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type boltForest struct { - db *bbolt.DB - - modeMtx sync.RWMutex - mode mode.Mode - - // mtx protects batches field. - mtx sync.Mutex - batches []*batch - - cfg -} - -const ( - childrenKeySize = 17 - maxKeySize = childrenKeySize -) - -var ( - dataBucket = []byte{0} - logBucket = []byte{1} -) - -// ErrDegradedMode is returned when pilorama is in a degraded mode. -var ErrDegradedMode = logicerr.New("pilorama is in a degraded mode") - -// ErrReadOnlyMode is returned when pilorama is in a read-only mode. -var ErrReadOnlyMode = logicerr.New("pilorama is in a read-only mode") - -// NewBoltForest returns storage wrapper for storing operations on CRDT trees. -// -// Each tree is stored in a separate bucket by `CID + treeID` key. -// All integers are stored in little-endian unless explicitly specified otherwise. -// -// DB schema (for a single tree): -// timestamp is 8-byte, id is 4-byte. -// -// log storage (logBucket): -// timestamp in big-endian -> log operation -// -// tree storage (dataBucket): -// - 't' + node (id) -> timestamp when the node first appeared, -// - 'p' + node (id) -> parent (id), -// - 'm' + node (id) -> serialized meta, -// - 'c' + parent (id) + child (id) -> 0/1, -// - 'i' + 0 + attrKey + 0 + attrValue + 0 + parent (id) + node (id) -> 0/1 (1 for automatically created nodes). -func NewBoltForest(opts ...Option) ForestStorage { - b := boltForest{ - cfg: cfg{ - perm: os.ModePerm, - maxBatchDelay: bbolt.DefaultMaxBatchDelay, - maxBatchSize: bbolt.DefaultMaxBatchSize, - openFile: os.OpenFile, - metrics: &noopMetrics{}, - }, - mode: mode.Disabled, - } - - for i := range opts { - opts[i](&b.cfg) - } - - return &b -} - -func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { - t.modeMtx.Lock() - defer t.modeMtx.Unlock() - - if t.mode == m { - return nil - } - - err := t.Close(ctx) - if err == nil && !m.NoMetabase() { - if err = t.openBolt(m); err == nil { - err = t.Init(ctx) - } - } - if err != nil { - return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) - } - - t.mode = m - t.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) - return nil -} - -func (t *boltForest) Open(_ context.Context, mode mode.Mode) error { - t.modeMtx.Lock() - defer t.modeMtx.Unlock() - t.mode = mode - if mode.NoMetabase() { - return nil - } - return t.openBolt(mode) -} - -func (t *boltForest) openBolt(m mode.Mode) error { - readOnly := m.ReadOnly() - err := util.MkdirAllX(filepath.Dir(t.path), t.perm) - if err != nil { - return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err)) - } - - opts := *bbolt.DefaultOptions - opts.ReadOnly = readOnly - opts.NoSync = t.noSync - opts.Timeout = 100 * time.Millisecond - opts.OpenFile = t.openFile - - t.db, err = bbolt.Open(t.path, t.perm, &opts) - if err != nil { - return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err)) - } - - t.db.MaxBatchSize = t.maxBatchSize - t.db.MaxBatchDelay = t.maxBatchDelay - t.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) - return nil -} - -func (t *boltForest) Init(context.Context) error { - if t.mode.NoMetabase() || t.db.IsReadOnly() { - return nil - } - return t.db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(dataBucket) - if err != nil { - return err - } - _, err = tx.CreateBucketIfNotExists(logBucket) - return err - }) -} - -func (t *boltForest) Close(context.Context) error { - var err error - if t.db != nil { - err = t.db.Close() - } - if err == nil { - t.metrics.Close() - } - return err -} - -func (t *boltForest) SetParentID(id string) { - t.metrics.SetParentID(id) -} - -// TreeMove implements the Forest interface. -func (t *boltForest) TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) { - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeMove", - trace.WithAttributes( - attribute.String("container_id", d.CID.EncodeToString()), - attribute.Int("position", d.Position), - attribute.Int("size", d.Size), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if !d.checkValid() { - return nil, ErrInvalidCIDDescriptor - } - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } else if t.mode.ReadOnly() { - return nil, ErrReadOnlyMode - } - - lm := *m - fullID := bucketName(d.CID, treeID) - return &lm, metaerr.Wrap(t.db.Batch(func(tx *bbolt.Tx) error { - bLog, bTree, err := t.getTreeBuckets(tx, fullID) - if err != nil { - return err - } - - lm.Time = t.getLatestTimestamp(bLog, d.Position, d.Size) - if lm.Child == RootID { - lm.Child = t.findSpareID(bTree) - } - return t.do(bLog, bTree, make([]byte, maxKeySize), &lm) - })) -} - -func (t *boltForest) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeHeight", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return 0, ErrDegradedMode - } - - var height uint64 - var retErr error - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - if treeRoot != nil { - k, _ := treeRoot.Bucket(logBucket).Cursor().Last() - height = binary.BigEndian.Uint64(k) - } else { - retErr = ErrTreeNotFound - } - return nil - }) - if err == nil { - err = retErr - } - return height, metaerr.Wrap(err) -} - -// TreeExists implements the Forest interface. -func (t *boltForest) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeExists", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeExists", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return false, ErrDegradedMode - } - - var exists bool - - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - exists = treeRoot != nil - return nil - }) - success = err == nil - return exists, metaerr.Wrap(err) -} - -var syncHeightKey = []byte{'h'} - -// TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeUpdateLastSyncHeight", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeUpdateLastSyncHeight", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("height", strconv.FormatUint(height, 10)), - ), - ) - defer span.End() - - rawHeight := make([]byte, 8) - binary.LittleEndian.PutUint64(rawHeight, height) - - buck := bucketName(cid, treeID) - err := metaerr.Wrap(t.db.Batch(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(buck) - if treeRoot == nil { - return ErrTreeNotFound - } - - b := treeRoot.Bucket(dataBucket) - return b.Put(syncHeightKey, rawHeight) - })) - success = err == nil - return err -} - -// TreeLastSyncHeight implements the pilorama.Forest interface. -func (t *boltForest) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeLastSyncHeight", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeLastSyncHeight", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - var height uint64 - - buck := bucketName(cid, treeID) - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(buck) - if treeRoot == nil { - return ErrTreeNotFound - } - - b := treeRoot.Bucket(dataBucket) - data := b.Get(syncHeightKey) - if len(data) == 8 { - height = binary.LittleEndian.Uint64(data) - } - return nil - }) - success = err == nil - return height, metaerr.Wrap(err) -} - -// TreeAddByPath implements the Forest interface. -func (t *boltForest) TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeAddByPath", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeAddByPath", - trace.WithAttributes( - attribute.String("container_id", d.CID.EncodeToString()), - attribute.Int("position", d.Position), - attribute.Int("size", d.Size), - attribute.String("tree_id", treeID), - attribute.String("attr", attr), - attribute.Int("path_count", len(path)), - attribute.Int("meta_count", len(meta)), - ), - ) - defer span.End() - - res, err := t.addByPathInternal(d, attr, treeID, path, meta) - success = err == nil - return res, err -} - -func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID string, path []string, meta []KeyValue) ([]Move, error) { - if !d.checkValid() { - return nil, ErrInvalidCIDDescriptor - } - if !isAttributeInternal(attr) { - return nil, ErrNotPathAttribute - } - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } else if t.mode.ReadOnly() { - return nil, ErrReadOnlyMode - } - - var lm []Move - var key [maxKeySize]byte - - fullID := bucketName(d.CID, treeID) - err := t.db.Batch(func(tx *bbolt.Tx) error { - bLog, bTree, err := t.getTreeBuckets(tx, fullID) - if err != nil { - return err - } - - i, node := t.getPathPrefix(bTree, attr, path) - - ts := t.getLatestTimestamp(bLog, d.Position, d.Size) - lm = make([]Move, len(path)-i+1) - for j := i; j < len(path); j++ { - lm[j-i] = Move{ - Parent: node, - Meta: Meta{ - Time: ts, - Items: []KeyValue{{Key: attr, Value: []byte(path[j])}}, - }, - Child: t.findSpareID(bTree), - } - - err := t.do(bLog, bTree, key[:], &lm[j-i]) - if err != nil { - return err - } - - ts = nextTimestamp(ts, uint64(d.Position), uint64(d.Size)) - node = lm[j-i].Child - } - - lm[len(lm)-1] = Move{ - Parent: node, - Meta: Meta{ - Time: ts, - Items: meta, - }, - Child: t.findSpareID(bTree), - } - return t.do(bLog, bTree, key[:], &lm[len(lm)-1]) - }) - return lm, metaerr.Wrap(err) -} - -// getLatestTimestamp returns timestamp for a new operation which is guaranteed to be bigger than -// all timestamps corresponding to already stored operations. -func (t *boltForest) getLatestTimestamp(bLog *bbolt.Bucket, pos, size int) uint64 { - var ts uint64 - - c := bLog.Cursor() - key, _ := c.Last() - if len(key) != 0 { - ts = binary.BigEndian.Uint64(key) - } - return nextTimestamp(ts, uint64(pos), uint64(size)) -} - -// findSpareID returns random unused ID. -func (t *boltForest) findSpareID(bTree *bbolt.Bucket) uint64 { - id := uint64(rand.Int63()) - key := make([]byte, 9) - - for { - _, _, _, ok := t.getState(bTree, stateKey(key, id)) - if !ok { - return id - } - id = uint64(rand.Int63()) - } -} - -// TreeApply implements the Forest interface. -func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeApply", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApply", - trace.WithAttributes( - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.Bool("background", backgroundSync), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return ErrDegradedMode - } else if t.mode.ReadOnly() { - return ErrReadOnlyMode - } - - if backgroundSync { - var seen bool - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cnr, treeID)) - if treeRoot == nil { - success = true - return nil - } - - b := treeRoot.Bucket(logBucket) - - var logKey [8]byte - binary.BigEndian.PutUint64(logKey[:], m.Time) - seen = b.Get(logKey[:]) != nil - success = true - return nil - }) - if err != nil || seen { - success = err == nil - return metaerr.Wrap(err) - } - } - - if t.db.MaxBatchSize == 1 { - fullID := bucketName(cnr, treeID) - err := metaerr.Wrap(t.db.Update(func(tx *bbolt.Tx) error { - bLog, bTree, err := t.getTreeBuckets(tx, fullID) - if err != nil { - return err - } - - var lm Move - return t.applyOperation(bLog, bTree, []*Move{m}, &lm) - })) - success = err == nil - return err - } - - ch := make(chan error, 1) - t.addBatch(cnr, treeID, m, ch) - err := <-ch - success = err == nil - return metaerr.Wrap(err) -} - -func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch", - trace.WithAttributes( - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - m, err := t.filterSeen(cnr, treeID, m) - if err != nil { - return err - } - if len(m) == 0 { - success = true - return nil - } - - ch := make(chan error) - b := &batch{ - forest: t, - cid: cnr, - treeID: treeID, - results: []chan<- error{ch}, - operations: m, - } - go func() { - b.run() - }() - err = <-ch - success = err == nil - return metaerr.Wrap(err) -} - -func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) { - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - ops := make([]*Move, 0, len(m)) - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cnr, treeID)) - if treeRoot == nil { - ops = m - return nil - } - b := treeRoot.Bucket(logBucket) - for _, op := range m { - var logKey [8]byte - binary.BigEndian.PutUint64(logKey[:], op.Time) - seen := b.Get(logKey[:]) != nil - if !seen { - ops = append(ops, op) - } - } - return nil - }) - if err != nil { - return nil, metaerr.Wrap(err) - } - return ops, nil -} - -// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed. -func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeApplyStream", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyStream", - trace.WithAttributes( - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return ErrDegradedMode - } else if t.mode.ReadOnly() { - return ErrReadOnlyMode - } - - fullID := bucketName(cnr, treeID) - err := metaerr.Wrap(t.db.Update(func(tx *bbolt.Tx) error { - bLog, bTree, err := t.getTreeBuckets(tx, fullID) - if err != nil { - return err - } - for { - select { - case <-ctx.Done(): - return ctx.Err() - case m, ok := <-source: - if !ok { - return nil - } - var lm Move - if e := t.applyOperation(bLog, bTree, []*Move{m}, &lm); e != nil { - return e - } - } - } - })) - success = err == nil - return err -} - -func (t *boltForest) addBatch(cnr cidSDK.ID, treeID string, m *Move, ch chan error) { - t.mtx.Lock() - for i := 0; i < len(t.batches); i++ { - t.batches[i].mtx.Lock() - if t.batches[i].timer == nil { - t.batches[i].mtx.Unlock() - copy(t.batches[i:], t.batches[i+1:]) - t.batches = t.batches[:len(t.batches)-1] - i-- - continue - } - - found := t.batches[i].cid.Equals(cnr) && t.batches[i].treeID == treeID - if found { - t.batches[i].results = append(t.batches[i].results, ch) - t.batches[i].operations = append(t.batches[i].operations, m) - if len(t.batches[i].operations) == t.db.MaxBatchSize { - t.batches[i].timer.Stop() - t.batches[i].timer = nil - t.batches[i].mtx.Unlock() - b := t.batches[i] - t.mtx.Unlock() - b.trigger() - return - } - t.batches[i].mtx.Unlock() - t.mtx.Unlock() - return - } - t.batches[i].mtx.Unlock() - } - b := &batch{ - forest: t, - cid: cnr, - treeID: treeID, - results: []chan<- error{ch}, - operations: []*Move{m}, - } - b.mtx.Lock() - b.timer = time.AfterFunc(t.db.MaxBatchDelay, b.trigger) - b.mtx.Unlock() - t.batches = append(t.batches, b) - t.mtx.Unlock() -} - -func (t *boltForest) getTreeBuckets(tx *bbolt.Tx, treeRoot []byte) (*bbolt.Bucket, *bbolt.Bucket, error) { - child := tx.Bucket(treeRoot) - if child != nil { - return child.Bucket(logBucket), child.Bucket(dataBucket), nil - } - - child, err := tx.CreateBucket(treeRoot) - if err != nil { - return nil, nil, err - } - bLog, err := child.CreateBucket(logBucket) - if err != nil { - return nil, nil, err - } - bData, err := child.CreateBucket(dataBucket) - if err != nil { - return nil, nil, err - } - return bLog, bData, nil -} - -// applyOperations applies log operations. Assumes lm are sorted by timestamp. -func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*Move, lm *Move) error { - var tmp Move - var cKey [maxKeySize]byte - - c := logBucket.Cursor() - - key, value := c.Last() - - b := bytes.NewReader(nil) - r := io.NewBinReaderFromIO(b) - - // 1. Undo up until the desired timestamp is here. - for len(key) == 8 && ms[0].Time < binary.BigEndian.Uint64(key) { - b.Reset(value) - - tmp.Child = r.ReadU64LE() - tmp.Parent = r.ReadU64LE() - tmp.Time = r.ReadVarUint() - if r.Err != nil { - return r.Err - } - if err := t.undo(&tmp, treeBucket, cKey[:]); err != nil { - return err - } - key, value = c.Prev() - } - - for i := range ms { - // Loop invariant: key represents the next stored timestamp after ms[i].Time. - - // 2. Insert the operation. - *lm = *ms[i] - if err := t.do(logBucket, treeBucket, cKey[:], lm); err != nil { - return err - } - - // Cursor can be invalid, seek again. - binary.BigEndian.PutUint64(cKey[:], lm.Time) - _, _ = c.Seek(cKey[:8]) - key, value = c.Next() - - // 3. Re-apply all other operations. - for len(key) == 8 && (i == len(ms)-1 || binary.BigEndian.Uint64(key) < ms[i+1].Time) { - if err := t.logFromBytes(&tmp, value); err != nil { - return err - } - if err := t.redo(treeBucket, cKey[:], &tmp, value[16:]); err != nil { - return err - } - key, value = c.Next() - } - } - - return nil -} - -func (t *boltForest) do(lb *bbolt.Bucket, b *bbolt.Bucket, key []byte, op *Move) error { - binary.BigEndian.PutUint64(key, op.Time) - rawLog := t.logToBytes(op) - if err := lb.Put(key[:8], rawLog); err != nil { - return err - } - - return t.redo(b, key, op, rawLog[16:]) -} - -func (t *boltForest) redo(b *bbolt.Bucket, key []byte, op *Move, rawMeta []byte) error { - var err error - - parent, ts, currMeta, inTree := t.getState(b, stateKey(key, op.Child)) - if inTree { - err = t.putState(b, oldKey(key, op.Time), parent, ts, currMeta) - } else { - ts = op.Time - err = b.Delete(oldKey(key, op.Time)) - } - - if err != nil || op.Child == op.Parent || t.isAncestor(b, op.Child, op.Parent) { - return err - } - - if inTree { - if err := b.Delete(childrenKey(key, op.Child, parent)); err != nil { - return err - } - - var meta Meta - if err := meta.FromBytes(currMeta); err != nil { - return err - } - for i := range meta.Items { - if isAttributeInternal(meta.Items[i].Key) { - key = internalKey(key, meta.Items[i].Key, string(meta.Items[i].Value), parent, op.Child) - err := b.Delete(key) - if err != nil { - return err - } - } - } - } - return t.addNode(b, key, op.Child, op.Parent, ts, op.Meta, rawMeta) -} - -// removeNode removes node keys from the tree except the children key or its parent. -func (t *boltForest) removeNode(b *bbolt.Bucket, key []byte, node, parent Node) error { - k := stateKey(key, node) - _, _, rawMeta, _ := t.getState(b, k) - - var meta Meta - if err := meta.FromBytes(rawMeta); err == nil { - for i := range meta.Items { - if isAttributeInternal(meta.Items[i].Key) { - err := b.Delete(internalKey(nil, meta.Items[i].Key, string(meta.Items[i].Value), parent, node)) - if err != nil { - return err - } - } - } - } - return b.Delete(k) -} - -// addNode adds node keys to the tree except the timestamp key. -func (t *boltForest) addNode(b *bbolt.Bucket, key []byte, child, parent Node, time Timestamp, meta Meta, rawMeta []byte) error { - if err := t.putState(b, stateKey(key, child), parent, time, rawMeta); err != nil { - return err - } - - err := b.Put(childrenKey(key, child, parent), []byte{1}) - if err != nil { - return err - } - - for i := range meta.Items { - if !isAttributeInternal(meta.Items[i].Key) { - continue - } - - key = internalKey(key, meta.Items[i].Key, string(meta.Items[i].Value), parent, child) - if len(meta.Items) == 1 { - err = b.Put(key, []byte{1}) - } else { - err = b.Put(key, []byte{0}) - } - if err != nil { - return err - } - } - return nil -} - -func (t *boltForest) undo(m *Move, b *bbolt.Bucket, key []byte) error { - if err := b.Delete(childrenKey(key, m.Child, m.Parent)); err != nil { - return err - } - - parent, ts, rawMeta, ok := t.getState(b, oldKey(key, m.Time)) - if !ok { - return t.removeNode(b, key, m.Child, m.Parent) - } - - var meta Meta - if err := meta.FromBytes(rawMeta); err != nil { - return err - } - return t.addNode(b, key, m.Child, parent, ts, meta, rawMeta) -} - -func (t *boltForest) isAncestor(b *bbolt.Bucket, parent, child Node) bool { - key := make([]byte, 9) - key[0] = 's' - for node := child; node != parent; { - binary.LittleEndian.PutUint64(key[1:], node) - parent, _, _, ok := t.getState(b, key) - if !ok { - return false - } - node = parent - } - return true -} - -// TreeGetByPath implements the Forest interface. -func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeGetByPath", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetByPath", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("attr", attr), - attribute.Int("path_count", len(path)), - attribute.Bool("latest", latest), - ), - ) - defer span.End() - - if !isAttributeInternal(attr) { - return nil, ErrNotPathAttribute - } - - if len(path) == 0 { - success = true - return nil, nil - } - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - var nodes []Node - - err := metaerr.Wrap(t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - if treeRoot == nil { - return ErrTreeNotFound - } - - b := treeRoot.Bucket(dataBucket) - - i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) - if i < len(path)-1 { - return nil - } - - var maxTimestamp uint64 - - c := b.Cursor() - - for i := range curNodes { - attrKey := internalKey(nil, attr, path[len(path)-1], curNodes[i], 0) - attrKey = attrKey[:len(attrKey)-8] - childKey, _ := c.Seek(attrKey) - for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) { - child := binary.LittleEndian.Uint64(childKey[len(childKey)-8:]) - if latest { - _, ts, _, _ := t.getState(b, stateKey(make([]byte, 9), child)) - if ts >= maxTimestamp { - nodes = append(nodes[:0], child) - maxTimestamp = ts - } - } else { - nodes = append(nodes, child) - } - childKey, _ = c.Next() - } - } - return nil - })) - success = err == nil - return nodes, err -} - -// TreeGetMeta implements the forest interface. -func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeGetMeta", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetMeta", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("node_id", strconv.FormatUint(nodeID, 10)), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return Meta{}, 0, ErrDegradedMode - } - - key := stateKey(make([]byte, 9), nodeID) - - var m Meta - var parentID uint64 - - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - if treeRoot == nil { - return ErrTreeNotFound - } - - b := treeRoot.Bucket(dataBucket) - if data := b.Get(key); len(data) != 0 { - parentID = binary.LittleEndian.Uint64(data) - } - _, _, meta, _ := t.getState(b, stateKey(key, nodeID)) - return m.FromBytes(meta) - }) - success = err == nil - return m, parentID, metaerr.Wrap(err) -} - -func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshold int) bool { - key := make([]byte, 9) - key[0] = 'c' - - count := 0 - for _, nodeID := range nodeIDs { - binary.LittleEndian.PutUint64(key[1:], nodeID) - - c := b.Cursor() - for k, _ := c.Seek(key); len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() { - if count++; count > threshold { - return false - } - } - } - return true -} - -// TreeSortedByFilename implements the Forest interface. -func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeSortedByFilename", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeSortedByFilename", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, last, ErrDegradedMode - } - if len(nodeIDs) == 0 { - return nil, last, errors.New("empty node list") - } - - h := newHeap(last, count) - key := make([]byte, 9) - - var result []NodeInfo - var fewChildren bool - - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - if treeRoot == nil { - return ErrTreeNotFound - } - - b := treeRoot.Bucket(dataBucket) - - // If the node is a leaf, we could scan all filenames in the tree. - // To prevent this we first count the number of children: if it is less than - // the number of nodes we need to return, fallback to TreeGetChildren() implementation. - if fewChildren = t.hasFewChildren(b, nodeIDs, count); fewChildren { - var err error - result, err = t.getChildren(b, nodeIDs) - return err - } - - t.fillSortedChildren(b, nodeIDs, h) - - for info, ok := h.pop(); ok; info, ok = h.pop() { - for _, id := range info.id { - childInfo, err := t.getChildInfo(b, key, id) - if err != nil { - return err - } - result = append(result, childInfo) - } - } - return nil - }) - - success = err == nil - if err != nil { - return nil, last, metaerr.Wrap(err) - } - - if fewChildren { - result = sortAndCut(result, last) - } - res := mergeNodeInfos(result) - if len(res) > count { - res = res[:count] - } - if len(res) != 0 { - s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = NewCursor(s, res[len(res)-1].LastChild()) - } - return res, last, metaerr.Wrap(err) -} - -func sortByFilename(nodes []NodeInfo) { - slices.SortFunc(nodes, func(a, b NodeInfo) int { - return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename)) - }) -} - -func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { - var lastBytes []byte - if last != nil { - lastBytes = []byte(last.GetFilename()) - } - sortByFilename(result) - - for i := range result { - if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 { - return result[i:] - } - } - return nil -} - -func (t *boltForest) getChildInfo(b *bbolt.Bucket, key []byte, childID Node) (NodeInfo, error) { - childInfo := NodeInfo{ID: childID} - parentID, _, metaBytes, found := t.getState(b, stateKey(key, childID)) - if found { - childInfo.ParentID = parentID - if err := childInfo.Meta.FromBytes(metaBytes); err != nil { - return NodeInfo{}, err - } - } - return childInfo, nil -} - -func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *fixedHeap) { - c := b.Cursor() - prefix := internalKeyPrefix(nil, AttributeFilename) - - length := uint16(0) - count := 0 - - var nodes []uint64 - var lastFilename *string - for k, _ := c.Seek(prefix); len(k) > 0 && k[0] == 'i'; k, _ = c.Next() { - if len(k) < len(prefix)+2+16 { - continue - } - - parentID := binary.LittleEndian.Uint64(k[len(k)-16:]) - if !slices.Contains(nodeIDs, parentID) { - continue - } - - actualLength := binary.LittleEndian.Uint16(k[len(prefix):]) - childID := binary.LittleEndian.Uint64(k[len(k)-8:]) - filename := string(k[len(prefix)+2 : len(k)-16]) - - if lastFilename == nil { - lastFilename = &filename - nodes = append(nodes, childID) - } else if *lastFilename == filename { - nodes = append(nodes, childID) - } else { - processed := h.push(nodes, *lastFilename) - nodes = MultiNode{childID} - lastFilename = &filename - if actualLength != length { - length = actualLength - count = 1 - } else if processed { - if count++; count > h.count { - lastFilename = nil - nodes = nil - length = actualLength + 1 - count = 0 - c.Seek(binary.LittleEndian.AppendUint16(prefix, length)) - c.Prev() // c.Next() will be performed by for loop - } - } - } - } - - if len(nodes) != 0 && lastFilename != nil { - h.push(nodes, *lastFilename) - } -} - -// TreeGetChildren implements the Forest interface. -func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeGetChildren", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetChildren", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("node_id", strconv.FormatUint(nodeID, 10)), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - var result []NodeInfo - - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - if treeRoot == nil { - return ErrTreeNotFound - } - - b := treeRoot.Bucket(dataBucket) - - var err error - result, err = t.getChildren(b, []Node{nodeID}) - return err - }) - success = err == nil - return result, metaerr.Wrap(err) -} - -func (t *boltForest) getChildren(b *bbolt.Bucket, nodeIDs MultiNode) ([]NodeInfo, error) { - var result []NodeInfo - - key := make([]byte, 9) - for _, nodeID := range nodeIDs { - key[0] = 'c' - binary.LittleEndian.PutUint64(key[1:], nodeID) - - c := b.Cursor() - for k, _ := c.Seek(key); len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() { - childID := binary.LittleEndian.Uint64(k[9:]) - childInfo, err := t.getChildInfo(b, key, childID) - if err != nil { - return nil, err - } - result = append(result, childInfo) - } - } - return result, nil -} - -// TreeList implements the Forest interface. -func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeList", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeList", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - var ids []string - cidRaw := make([]byte, 32) - cid.Encode(cidRaw) - - cidLen := len(cidRaw) - - err := t.db.View(func(tx *bbolt.Tx) error { - c := tx.Cursor() - for k, _ := c.Seek(cidRaw); k != nil; k, _ = c.Next() { - if !bytes.HasPrefix(k, cidRaw) { - return nil - } - - ids = append(ids, string(k[cidLen:])) - } - - return nil - }) - if err != nil { - return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err)) - } - success = true - return ids, nil -} - -// TreeGetOpLog implements the pilorama.Forest interface. -func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeGetOpLog", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetOpLog", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("height", strconv.FormatUint(height, 10)), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return Move{}, ErrDegradedMode - } - - key := make([]byte, 8) - binary.BigEndian.PutUint64(key, height) - - var lm Move - - err := t.db.View(func(tx *bbolt.Tx) error { - treeRoot := tx.Bucket(bucketName(cid, treeID)) - if treeRoot == nil { - return ErrTreeNotFound - } - - c := treeRoot.Bucket(logBucket).Cursor() - if _, data := c.Seek(key); data != nil { - return t.moveFromBytes(&lm, data) - } - return nil - }) - success = err == nil - return lm, metaerr.Wrap(err) -} - -// TreeDrop implements the pilorama.Forest interface. -func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeDrop", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeDrop", - trace.WithAttributes( - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return ErrDegradedMode - } else if t.mode.ReadOnly() { - return ErrReadOnlyMode - } - - err := metaerr.Wrap(t.db.Batch(func(tx *bbolt.Tx) error { - if treeID == "" { - c := tx.Cursor() - prefix := make([]byte, 32) - cid.Encode(prefix) - for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Seek(prefix) { - err := tx.DeleteBucket(k) - if err != nil { - return err - } - _, _ = c.First() // rewind the cursor to the root page - } - return nil - } - err := tx.DeleteBucket(bucketName(cid, treeID)) - if errors.Is(err, bbolt.ErrBucketNotFound) { - return ErrTreeNotFound - } - return err - })) - success = err == nil - return err -} - -// TreeListTrees implements ForestStorage. -func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - t.metrics.AddMethodDuration("TreeListTrees", time.Since(startedAt), success) - }() - - _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeListTrees") - defer span.End() - - t.modeMtx.RLock() - defer t.modeMtx.RUnlock() - - if t.mode.NoMetabase() { - return nil, ErrDegradedMode - } - - batchSize := prm.BatchSize - if batchSize <= 0 { - batchSize = treeListTreesBatchSizeDefault - } - var res TreeListTreesResult - err := metaerr.Wrap(t.db.View(func(tx *bbolt.Tx) error { - c := tx.Cursor() - checkNextPageToken := true - for k, _ := c.Seek(prm.NextPageToken); k != nil; k, _ = c.Next() { - if bytes.Equal(k, dataBucket) || bytes.Equal(k, logBucket) { - continue - } - - if checkNextPageToken && bytes.Equal(k, prm.NextPageToken) { - checkNextPageToken = false - continue - } - - var contID cidSDK.ID - if err := contID.Decode(k[:32]); err != nil { - return fmt.Errorf("decode container ID: %w", err) - } - res.Items = append(res.Items, ContainerIDTreeID{ - CID: contID, - TreeID: string(k[32:]), - }) - - if len(res.Items) == batchSize { - res.NextPageToken = bytes.Clone(k) - break - } - } - return nil - })) - success = err == nil - if err != nil { - return nil, err - } - return &res, nil -} - -func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) { - c := bTree.Cursor() - - var curNodes []Node - nextNodes := []Node{RootID} - var attrKey []byte - - for i := range path { - curNodes, nextNodes = nextNodes, curNodes[:0] - for j := range curNodes { - attrKey = internalKey(attrKey, attr, path[i], curNodes[j], 0) - attrKey = attrKey[:len(attrKey)-8] - - childKey, value := c.Seek(attrKey) - for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) { - if len(value) == 1 && value[0] == 1 { - nextNodes = append(nextNodes, binary.LittleEndian.Uint64(childKey[len(childKey)-8:])) - } - childKey, value = c.Next() - } - } - - if len(nextNodes) == 0 { - return i, curNodes - } - } - - return len(path), nextNodes -} - -func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) { - c := bTree.Cursor() - - var curNode Node - var attrKey []byte - -loop: - for i := range path { - attrKey = internalKey(attrKey, attr, path[i], curNode, 0) - attrKey = attrKey[:len(attrKey)-8] - - childKey, value := c.Seek(attrKey) - for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) { - if len(value) == 1 && value[0] == 1 { - curNode = binary.LittleEndian.Uint64(childKey[len(childKey)-8:]) - continue loop - } - childKey, value = c.Next() - } - - return i, curNode - } - - return len(path), curNode -} - -func (t *boltForest) moveFromBytes(m *Move, data []byte) error { - return t.logFromBytes(m, data) -} - -func (t *boltForest) logFromBytes(lm *Move, data []byte) error { - lm.Child = binary.LittleEndian.Uint64(data) - lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.FromBytes(data[16:]) -} - -func (t *boltForest) logToBytes(lm *Move) []byte { - w := io.NewBufBinWriter() - size := 8 + 8 + lm.Size() + 1 - // if lm.HasOld { - // size += 8 + lm.Old.Meta.Size() - // } - - w.Grow(size) - w.WriteU64LE(lm.Child) - w.WriteU64LE(lm.Parent) - lm.EncodeBinary(w.BinWriter) - // w.WriteBool(lm.HasOld) - // if lm.HasOld { - // w.WriteU64LE(lm.Old.Parent) - // lm.Old.Meta.EncodeBinary(w.BinWriter) - // } - return w.Bytes() -} - -func bucketName(cid cidSDK.ID, treeID string) []byte { - treeRoot := make([]byte, 32+len(treeID)) - cid.Encode(treeRoot) - copy(treeRoot[32:], treeID) - return treeRoot -} - -// 'o' + time -> old meta. -func oldKey(key []byte, ts Timestamp) []byte { - key[0] = 'o' - binary.LittleEndian.PutUint64(key[1:], ts) - return key[:9] -} - -// 's' + child ID -> parent + timestamp of the first appearance + meta. -func stateKey(key []byte, child Node) []byte { - key[0] = 's' - binary.LittleEndian.PutUint64(key[1:], child) - return key[:9] -} - -func (t *boltForest) putState(b *bbolt.Bucket, key []byte, parent Node, timestamp Timestamp, meta []byte) error { - data := make([]byte, len(meta)+8+8) - binary.LittleEndian.PutUint64(data, parent) - binary.LittleEndian.PutUint64(data[8:], timestamp) - copy(data[16:], meta) - return b.Put(key, data) -} - -func (t *boltForest) getState(b *bbolt.Bucket, key []byte) (Node, Timestamp, []byte, bool) { - data := b.Get(key) - if data == nil { - return 0, 0, nil, false - } - - parent := binary.LittleEndian.Uint64(data) - timestamp := binary.LittleEndian.Uint64(data[8:]) - return parent, timestamp, data[16:], true -} - -// 'c' + parent (id) + child (id) -> 0/1. -func childrenKey(key []byte, child, parent Node) []byte { - key[0] = 'c' - binary.LittleEndian.PutUint64(key[1:], parent) - binary.LittleEndian.PutUint64(key[9:], child) - return key[:childrenKeySize] -} - -func internalKeyPrefix(key []byte, k string) []byte { - key = key[:0] - key = append(key, 'i') - - l := len(k) - key = binary.LittleEndian.AppendUint16(key, uint16(l)) - key = append(key, k...) - return key -} - -// 'i' + attribute name (string) + attribute value (string) + parent (id) + node (id) -> 0/1. -func internalKey(key []byte, k, v string, parent, node Node) []byte { - size := 1 /* prefix */ + 2*2 /* len */ + 2*8 /* nodes */ + len(k) + len(v) - if cap(key) < size { - key = make([]byte, 0, size) - } - - key = internalKeyPrefix(key, k) - - l := len(v) - key = binary.LittleEndian.AppendUint16(key, uint16(l)) - key = append(key, v...) - - key = binary.LittleEndian.AppendUint64(key, parent) - key = binary.LittleEndian.AppendUint64(key, node) - return key -} diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go deleted file mode 100644 index ebfd0bcc0..000000000 --- a/pkg/local_object_storage/pilorama/forest.go +++ /dev/null @@ -1,400 +0,0 @@ -package pilorama - -import ( - "context" - "errors" - "fmt" - "slices" - "sort" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -var errInvalidKeyFormat = errors.New("invalid format: key must be cid and treeID") - -// memoryForest represents multiple replicating trees sharing a single storage. -type memoryForest struct { - // treeMap maps tree identifier (container ID + name) to the replicated log. - treeMap map[string]*memoryTree -} - -var _ Forest = (*memoryForest)(nil) - -// NewMemoryForest creates new empty forest. -// TODO: this function will eventually be removed and is here for debugging. -func NewMemoryForest() ForestStorage { - return &memoryForest{ - treeMap: make(map[string]*memoryTree), - } -} - -// TreeMove implements the Forest interface. -func (f *memoryForest) TreeMove(_ context.Context, d CIDDescriptor, treeID string, op *Move) (*Move, error) { - if !d.checkValid() { - return nil, ErrInvalidCIDDescriptor - } - - fullID := d.CID.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - s = newMemoryTree() - f.treeMap[fullID] = s - } - - op.Time = s.timestamp(d.Position, d.Size) - if op.Child == RootID { - op.Child = s.findSpareID() - } - - lm := s.do(op) - s.operations = append(s.operations, lm) - return &lm.Move, nil -} - -// TreeAddByPath implements the Forest interface. -func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) { - if !d.checkValid() { - return nil, ErrInvalidCIDDescriptor - } - if !isAttributeInternal(attr) { - return nil, ErrNotPathAttribute - } - - fullID := d.CID.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - s = newMemoryTree() - f.treeMap[fullID] = s - } - - i, node := s.getPathPrefix(attr, path) - lm := make([]Move, len(path)-i+1) - for j := i; j < len(path); j++ { - op := s.do(&Move{ - Parent: node, - Meta: Meta{ - Time: s.timestamp(d.Position, d.Size), - Items: []KeyValue{{Key: attr, Value: []byte(path[j])}}, - }, - Child: s.findSpareID(), - }) - lm[j-i] = op.Move - node = op.Child - s.operations = append(s.operations, op) - } - - mCopy := slices.Clone(m) - op := s.do(&Move{ - Parent: node, - Meta: Meta{ - Time: s.timestamp(d.Position, d.Size), - Items: mCopy, - }, - Child: s.findSpareID(), - }) - s.operations = append(s.operations, op) - lm[len(lm)-1] = op.Move - return lm, nil -} - -// TreeApply implements the Forest interface. -func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, op *Move, _ bool) error { - fullID := cnr.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - s = newMemoryTree() - f.treeMap[fullID] = s - } - - return s.Apply(op) -} - -func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error { - for _, op := range ops { - if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil { - return err - } - } - return nil -} - -func (f *memoryForest) Init(context.Context) error { - return nil -} - -func (f *memoryForest) Open(context.Context, mode.Mode) error { - return nil -} - -func (f *memoryForest) SetMode(context.Context, mode.Mode) error { - return nil -} - -func (f *memoryForest) Close(context.Context) error { - return nil -} -func (f *memoryForest) SetParentID(string) {} - -// TreeGetByPath implements the Forest interface. -func (f *memoryForest) TreeGetByPath(_ context.Context, cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) { - if !isAttributeInternal(attr) { - return nil, ErrNotPathAttribute - } - - fullID := cid.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - return nil, ErrTreeNotFound - } - - return s.getByPath(attr, path, latest), nil -} - -// TreeGetMeta implements the Forest interface. -func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) { - fullID := cid.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - return Meta{}, 0, ErrTreeNotFound - } - - return s.infoMap[nodeID].Meta, s.infoMap[nodeID].Parent, nil -} - -// TreeSortedByFilename implements the Forest interface. -func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { - fullID := cid.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - return nil, start, ErrTreeNotFound - } - if count == 0 { - return nil, start, nil - } - - var res []NodeInfo - - for _, nodeID := range nodeIDs { - children := s.getChildren(nodeID) - for _, childID := range children { - var found bool - for _, kv := range s.infoMap[childID].Meta.Items { - if kv.Key == AttributeFilename { - found = true - break - } - } - if !found { - continue - } - res = append(res, NodeInfo{ - ID: childID, - Meta: s.infoMap[childID].Meta, - ParentID: s.infoMap[childID].Parent, - }) - } - } - if len(res) == 0 { - return nil, start, nil - } - - sortByFilename(res) - - r := mergeNodeInfos(res) - for i := range r { - if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { - finish := min(len(res), i+count) - last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], NewCursor(last, 0), nil - } - } - last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, NewCursor(last, 0), nil -} - -// TreeGetChildren implements the Forest interface. -func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID string, nodeID Node) ([]NodeInfo, error) { - fullID := cid.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - return nil, ErrTreeNotFound - } - - children := s.getChildren(nodeID) - res := make([]NodeInfo, 0, len(children)) - for _, childID := range children { - res = append(res, NodeInfo{ - ID: childID, - Meta: s.infoMap[childID].Meta, - ParentID: s.infoMap[childID].Parent, - }) - } - return res, nil -} - -// TreeGetOpLog implements the pilorama.Forest interface. -func (f *memoryForest) TreeGetOpLog(_ context.Context, cid cid.ID, treeID string, height uint64) (Move, error) { - fullID := cid.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - return Move{}, ErrTreeNotFound - } - - n := sort.Search(len(s.operations), func(i int) bool { - return s.operations[i].Time >= height - }) - if n == len(s.operations) { - return Move{}, nil - } - return s.operations[n].Move, nil -} - -// TreeDrop implements the pilorama.Forest interface. -func (f *memoryForest) TreeDrop(_ context.Context, cid cid.ID, treeID string) error { - cidStr := cid.String() - if treeID == "" { - for k := range f.treeMap { - if strings.HasPrefix(k, cidStr) { - delete(f.treeMap, k) - } - } - } else { - fullID := cidStr + "/" + treeID - _, ok := f.treeMap[fullID] - if !ok { - return ErrTreeNotFound - } - delete(f.treeMap, fullID) - } - return nil -} - -// TreeList implements the pilorama.Forest interface. -func (f *memoryForest) TreeList(_ context.Context, cid cid.ID) ([]string, error) { - var res []string - cidStr := cid.EncodeToString() - - for k := range f.treeMap { - cidAndTree := strings.Split(k, "/") - if cidAndTree[0] != cidStr { - continue - } - - res = append(res, cidAndTree[1]) - } - - return res, nil -} - -func (f *memoryForest) TreeHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) { - fullID := cid.EncodeToString() + "/" + treeID - tree, ok := f.treeMap[fullID] - if !ok { - return 0, ErrTreeNotFound - } - return tree.operations[len(tree.operations)-1].Time, nil -} - -// TreeExists implements the pilorama.Forest interface. -func (f *memoryForest) TreeExists(_ context.Context, cid cid.ID, treeID string) (bool, error) { - fullID := cid.EncodeToString() + "/" + treeID - _, ok := f.treeMap[fullID] - return ok, nil -} - -// TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (f *memoryForest) TreeUpdateLastSyncHeight(_ context.Context, cid cid.ID, treeID string, height uint64) error { - fullID := cid.EncodeToString() + "/" + treeID - t, ok := f.treeMap[fullID] - if !ok { - return ErrTreeNotFound - } - t.syncHeight = height - return nil -} - -// TreeLastSyncHeight implements the pilorama.Forest interface. -func (f *memoryForest) TreeLastSyncHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) { - fullID := cid.EncodeToString() + "/" + treeID - t, ok := f.treeMap[fullID] - if !ok { - return 0, ErrTreeNotFound - } - return t.syncHeight, nil -} - -// TreeListTrees implements Forest. -func (f *memoryForest) TreeListTrees(_ context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) { - batchSize := prm.BatchSize - if batchSize <= 0 { - batchSize = treeListTreesBatchSizeDefault - } - tmpSlice := make([]string, 0, len(f.treeMap)) - for k := range f.treeMap { - tmpSlice = append(tmpSlice, k) - } - sort.Strings(tmpSlice) - var idx int - if len(prm.NextPageToken) > 0 { - last := string(prm.NextPageToken) - idx, _ = sort.Find(len(tmpSlice), func(i int) int { - return -1 * strings.Compare(tmpSlice[i], last) - }) - if idx == len(tmpSlice) { - return &TreeListTreesResult{}, nil - } - if tmpSlice[idx] == last { - idx++ - } - } - - var result TreeListTreesResult - for idx < len(tmpSlice) { - cidAndTree := strings.Split(tmpSlice[idx], "/") - if len(cidAndTree) != 2 { - return nil, errInvalidKeyFormat - } - var contID cid.ID - if err := contID.DecodeString(cidAndTree[0]); err != nil { - return nil, fmt.Errorf("invalid format: %w", err) - } - - result.Items = append(result.Items, ContainerIDTreeID{ - CID: contID, - TreeID: cidAndTree[1], - }) - - if len(result.Items) == batchSize { - result.NextPageToken = []byte(tmpSlice[idx]) - break - } - idx++ - } - return &result, nil -} - -// TreeApplyStream implements ForestStorage. -func (f *memoryForest) TreeApplyStream(ctx context.Context, cnr cid.ID, treeID string, source <-chan *Move) error { - fullID := cnr.String() + "/" + treeID - s, ok := f.treeMap[fullID] - if !ok { - s = newMemoryTree() - f.treeMap[fullID] = s - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case m, ok := <-source: - if !ok { - return nil - } - if e := s.Apply(m); e != nil { - return e - } - } - } -} diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go deleted file mode 100644 index 844084c55..000000000 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ /dev/null @@ -1,1513 +0,0 @@ -package pilorama - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - mrand "math/rand" - "path/filepath" - "slices" - "strconv" - "strings" - "sync" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/google/uuid" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -var providers = []struct { - name string - construct func(t testing.TB, opts ...Option) ForestStorage -}{ - {"inmemory", func(t testing.TB, _ ...Option) ForestStorage { - f := NewMemoryForest() - require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init(context.Background())) - return f - }}, - {"bbolt", func(t testing.TB, opts ...Option) ForestStorage { - f := NewBoltForest( - append([]Option{ - WithPath(filepath.Join(t.TempDir(), "test.db")), - WithMaxBatchSize(1), - }, opts...)...) - require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init(context.Background())) - return f - }}, -} - -func testMeta(t *testing.T, f Forest, cid cidSDK.ID, treeID string, nodeID, parentID Node, expected Meta) { - actualMeta, actualParent, err := f.TreeGetMeta(context.Background(), cid, treeID, nodeID) - require.NoError(t, err) - require.Equal(t, parentID, actualParent) - require.Equal(t, expected, actualMeta) -} - -func TestForest_TreeMove(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeMove(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeMove(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - - meta := []KeyValue{ - {Key: AttributeVersion, Value: []byte("XXX")}, - {Key: AttributeFilename, Value: []byte("file.txt")}, - } - lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) - require.NoError(t, err) - require.Equal(t, 3, len(lm)) - - nodeID := lm[2].Child - t.Run("invalid descriptor", func(t *testing.T) { - _, err = s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, &Move{ - Parent: lm[1].Child, - Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})}, - Child: nodeID, - }) - require.ErrorIs(t, err, ErrInvalidCIDDescriptor) - }) - t.Run("same parent, update meta", func(t *testing.T) { - res, err := s.TreeMove(context.Background(), d, treeID, &Move{ - Parent: lm[1].Child, - Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})}, - Child: nodeID, - }) - require.NoError(t, err) - require.Equal(t, res.Child, nodeID) - - nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) - require.NoError(t, err) - require.ElementsMatch(t, []Node{nodeID}, nodes) - }) - t.Run("different parent", func(t *testing.T) { - res, err := s.TreeMove(context.Background(), d, treeID, &Move{ - Parent: RootID, - Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})}, - Child: nodeID, - }) - require.NoError(t, err) - require.Equal(t, res.Child, nodeID) - - nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) - require.NoError(t, err) - require.True(t, len(nodes) == 0) - - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false) - require.NoError(t, err) - require.ElementsMatch(t, []Node{nodeID}, nodes) - }) -} - -func TestMemoryForest_TreeGetChildren(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeGetChildren(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeGetChildren(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - - treeAdd := func(t *testing.T, child, parent Node) { - _, err := s.TreeMove(context.Background(), d, treeID, &Move{ - Parent: parent, - Child: child, - }) - require.NoError(t, err) - } - - // 0 - // |- 10 - // | |- 3 - // | |- 6 - // | |- 11 - // |- 2 - // |- 7 - treeAdd(t, 10, 0) - treeAdd(t, 3, 10) - treeAdd(t, 6, 10) - treeAdd(t, 11, 6) - treeAdd(t, 2, 0) - treeAdd(t, 7, 0) - - testGetChildren := func(t *testing.T, nodeID Node, expected []NodeInfo) { - actual, err := s.TreeGetChildren(context.Background(), cid, treeID, nodeID) - require.NoError(t, err) - require.ElementsMatch(t, expected, actual) - } - - testGetChildren(t, 0, []NodeInfo{ - {ID: 10, Meta: Meta{Time: 1, Items: []KeyValue{}}}, - {ID: 2, Meta: Meta{Time: 5, Items: []KeyValue{}}}, - {ID: 7, Meta: Meta{Time: 6, Items: []KeyValue{}}}, - }) - testGetChildren(t, 10, []NodeInfo{ - {ID: 3, ParentID: 10, Meta: Meta{Time: 2, Items: []KeyValue{}}}, - {ID: 6, ParentID: 10, Meta: Meta{Time: 3, Items: []KeyValue{}}}, - }) - testGetChildren(t, 3, nil) - testGetChildren(t, 6, []NodeInfo{{ID: 11, ParentID: 6, Meta: Meta{Time: 4, Items: []KeyValue{}}}}) - testGetChildren(t, 11, nil) - testGetChildren(t, 2, nil) - testGetChildren(t, 7, nil) - t.Run("missing node", func(t *testing.T) { - testGetChildren(t, 42, nil) - }) - t.Run("missing tree", func(t *testing.T) { - _, err := s.TreeGetChildren(context.Background(), cid, treeID+"123", 0) - require.ErrorIs(t, err, ErrTreeNotFound) - }) -} - -func BenchmarkForestSortedIteration(b *testing.B) { - for i := range providers { - if providers[i].name == "inmemory" { - continue - } - - cnr := cidtest.ID() - treeID := "version" - f := providers[i].construct(b) - - const total = 100_000 - d := CIDDescriptor{cnr, 0, 1} - for i := range total { - u, err := uuid.NewRandom() - if err != nil { - b.FailNow() - } - - _, err = f.TreeMove(context.Background(), d, treeID, &Move{ - Parent: RootID, - Child: RootID + Node(i+1), - Meta: Meta{ - Time: Timestamp(i + 1), - Items: []KeyValue{{ - Key: AttributeFilename, Value: []byte(u.String()), - }}, - }, - }) - if err != nil { - b.FailNow() - } - } - - b.Run(providers[i].name+",root", func(b *testing.B) { - for range b.N { - res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100) - if err != nil || len(res) != 100 { - b.Fatalf("err %v, count %d", err, len(res)) - } - } - }) - b.Run(providers[i].name+",leaf", func(b *testing.B) { - for range b.N { - res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100) - if err != nil || len(res) != 0 { - b.FailNow() - } - } - }) - } -} - -// The issue which we call "BugWithSkip" is easiest to understand when filenames are -// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved. -// The bug happens when we switch between length during listing. -// Thus this test contains numbers from 1 to 2000 and batch size of size 10. -func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - treeAdd := func(t *testing.T, ts int, filename string) { - _, err := s.TreeMove(context.Background(), d, treeID, &Move{ - Child: RootID + uint64(ts), - Parent: RootID, - Meta: Meta{ - Time: Timestamp(ts), - Items: []KeyValue{ - {Key: AttributeFilename, Value: []byte(filename)}, - }, - }, - }) - require.NoError(t, err) - } - - const count = 2000 - treeAdd(t, 1, "") - for i := 1; i < count; i++ { - treeAdd(t, i+1, strconv.Itoa(i+1)) - } - - var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { - res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) - require.NoError(t, err) - result = append(result, res...) - return cursor - } - - const batchSize = 10 - last := treeAppend(t, nil, batchSize) - for i := 1; i < count/batchSize; i++ { - last = treeAppend(t, last, batchSize) - } - require.Len(t, result, count) - require.True(t, slices.IsSortedFunc(result, func(a, b MultiNodeInfo) int { - filenameA := findAttr(a.Meta, AttributeFilename) - filenameB := findAttr(b.Meta, AttributeFilename) - return bytes.Compare(filenameA, filenameB) - })) -} - -func TestForest_TreeSortedIteration(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeSortedIteration(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - treeAdd := func(t *testing.T, ts int, filename string) { - _, err := s.TreeMove(context.Background(), d, treeID, &Move{ - Child: RootID + uint64(ts), - Parent: RootID, - Meta: Meta{ - Time: Timestamp(ts), - Items: []KeyValue{ - {Key: AttributeFilename, Value: []byte(filename)}, - }, - }, - }) - require.NoError(t, err) - } - - const count = 9 - treeAdd(t, 1, "") - for i := 1; i < count; i++ { - treeAdd(t, i+1, strconv.Itoa(i+1)) - } - - var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { - res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) - require.NoError(t, err) - result = append(result, res...) - return cursor - } - - last := treeAppend(t, nil, 2) - last = treeAppend(t, last, 3) - last = treeAppend(t, last, 0) - last = treeAppend(t, last, 1) - _ = treeAppend(t, last, 10) - - require.Len(t, result, count) - for i := range result { - require.Equal(t, MultiNode{RootID + uint64(i+1)}, result[i].Children) - if i == 0 { - require.Equal(t, "", string(findAttr(result[i].Meta, AttributeFilename))) - } else { - require.Equal(t, strconv.Itoa(RootID+i+1), string(findAttr(result[i].Meta, AttributeFilename))) - } - } -} - -func TestForest_TreeSortedFilename(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeSortedByFilename(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - const controlAttr = "control_attr" - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - - treeAddByPath := func(t *testing.T, filename string) { - path := strings.Split(filename, "/") - _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, path[:len(path)-1], - []KeyValue{ - {Key: AttributeFilename, Value: []byte(path[len(path)-1])}, - {Key: controlAttr, Value: []byte(filename)}, - }, - ) - require.NoError(t, err) - } - - expectAttributes := func(t *testing.T, attr string, expected []string, res []MultiNodeInfo) { - require.Equal(t, len(expected), len(res)) - - actual := make([]string, len(res)) - for i := range actual { - actual[i] = string(findAttr(res[i].Meta, attr)) - } - require.Equal(t, expected, actual) - } - - items := []string{ - "a/bbb/ccc", - "a/bbb/xxx", - "a/bbb/z", - "b/bbb/ccc", - "b/xxx/z", - "c", - } - - // Ensure we do not depend on insertion order in any way. - mrand.Shuffle(len(items), func(i, j int) { - items[i], items[j] = items[j], items[i] - }) - for i := range items { - treeAddByPath(t, items[i]) - } - - getChildren := func(t *testing.T, id MultiNode) []MultiNodeInfo { - res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, nil, len(items)) - require.NoError(t, err) - return res - } - - res := getChildren(t, MultiNode{RootID}) - expectAttributes(t, AttributeFilename, []string{"a", "b", "c"}, res) - expectAttributes(t, controlAttr, []string{"", "", "c"}, res) - - { - ra := getChildren(t, res[0].Children) - expectAttributes(t, AttributeFilename, []string{"bbb"}, ra) - expectAttributes(t, controlAttr, []string{""}, ra) - - rabbb := getChildren(t, ra[0].Children) - expectAttributes(t, AttributeFilename, []string{"ccc", "xxx", "z"}, rabbb) - expectAttributes(t, controlAttr, []string{"a/bbb/ccc", "a/bbb/xxx", "a/bbb/z"}, rabbb) - } - { - rb := getChildren(t, res[1].Children) - expectAttributes(t, AttributeFilename, []string{"bbb", "xxx"}, rb) - expectAttributes(t, controlAttr, []string{"", ""}, rb) - - rbbbb := getChildren(t, rb[0].Children) - expectAttributes(t, AttributeFilename, []string{"ccc"}, rbbbb) - expectAttributes(t, controlAttr, []string{"b/bbb/ccc"}, rbbbb) - - rbxxx := getChildren(t, rb[1].Children) - expectAttributes(t, AttributeFilename, []string{"z"}, rbxxx) - expectAttributes(t, controlAttr, []string{"b/xxx/z"}, rbxxx) - } - { - rc := getChildren(t, res[2].Children) - require.Len(t, rc, 0) - } -} - -func TestForest_TreeDrop(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeDrop(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeDrop(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - const cidsSize = 3 - var cids [cidsSize]cidSDK.ID - - for i := range cids { - cids[i] = cidtest.ID() - } - cid := cids[0] - - t.Run("return nil if not found", func(t *testing.T) { - require.ErrorIs(t, s.TreeDrop(context.Background(), cid, "123"), ErrTreeNotFound) - }) - - require.NoError(t, s.TreeDrop(context.Background(), cid, "")) - - trees := []string{"tree1", "tree2"} - var descs [cidsSize]CIDDescriptor - for i := range descs { - descs[i] = CIDDescriptor{cids[i], 0, 1} - } - d := descs[0] - for i := range trees { - _, err := s.TreeAddByPath(context.Background(), d, trees[i], AttributeFilename, []string{"path"}, - []KeyValue{{Key: "TreeName", Value: []byte(trees[i])}}) - require.NoError(t, err) - } - - err := s.TreeDrop(context.Background(), cid, trees[0]) - require.NoError(t, err) - - _, err = s.TreeGetByPath(context.Background(), cid, trees[0], AttributeFilename, []string{"path"}, true) - require.ErrorIs(t, err, ErrTreeNotFound) - - _, err = s.TreeGetByPath(context.Background(), cid, trees[1], AttributeFilename, []string{"path"}, true) - require.NoError(t, err) - - for j := range descs { - for i := range trees { - _, err := s.TreeAddByPath(context.Background(), descs[j], trees[i], AttributeFilename, []string{"path"}, - []KeyValue{{Key: "TreeName", Value: []byte(trees[i])}}) - require.NoError(t, err) - } - } - list, err := s.TreeList(context.Background(), cid) - require.NoError(t, err) - require.NotEmpty(t, list) - - require.NoError(t, s.TreeDrop(context.Background(), cid, "")) - - list, err = s.TreeList(context.Background(), cid) - require.NoError(t, err) - require.Empty(t, list) - - for j := 1; j < len(cids); j++ { - list, err = s.TreeList(context.Background(), cids[j]) - require.NoError(t, err) - require.Equal(t, len(list), len(trees)) - } -} - -func TestForest_TreeAdd(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeAdd(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeAdd(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - - meta := []KeyValue{ - {Key: AttributeVersion, Value: []byte("XXX")}, - {Key: AttributeFilename, Value: []byte("file.txt")}, - } - m := &Move{ - Parent: RootID, - Child: RootID, - Meta: Meta{Items: meta}, - } - - t.Run("invalid descriptor", func(t *testing.T) { - _, err := s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, m) - require.ErrorIs(t, err, ErrInvalidCIDDescriptor) - }) - - lm, err := s.TreeMove(context.Background(), d, treeID, m) - require.NoError(t, err) - - testMeta(t, s, cid, treeID, lm.Child, lm.Parent, Meta{Time: lm.Time, Items: meta}) - - nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false) - require.NoError(t, err) - require.ElementsMatch(t, []Node{lm.Child}, nodes) - - t.Run("other trees are unaffected", func(t *testing.T) { - _, err := s.TreeGetByPath(context.Background(), cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false) - require.ErrorIs(t, err, ErrTreeNotFound) - - _, _, err = s.TreeGetMeta(context.Background(), cid, treeID+"123", 0) - require.ErrorIs(t, err, ErrTreeNotFound) - }) -} - -func TestForest_TreeAddByPath(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeAddByPath(t, providers[i].construct(t)) - }) - } -} - -func testForestTreeAddByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cid := cidtest.ID() - d := CIDDescriptor{cid, 0, 1} - treeID := "version" - - meta := []KeyValue{ - {Key: AttributeVersion, Value: []byte("XXX")}, - {Key: AttributeFilename, Value: []byte("file.txt")}, - } - - t.Run("invalid descriptor", func(t *testing.T) { - _, err := s.TreeAddByPath(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta) - require.ErrorIs(t, err, ErrInvalidCIDDescriptor) - }) - t.Run("invalid attribute", func(t *testing.T) { - _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeVersion, []string{"yyy"}, meta) - require.ErrorIs(t, err, ErrNotPathAttribute) - }) - - lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) - require.NoError(t, err) - require.Equal(t, 3, len(lm)) - testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("path")}}}) - testMeta(t, s, cid, treeID, lm[1].Child, lm[1].Parent, Meta{Time: lm[1].Time, Items: []KeyValue{{AttributeFilename, []byte("to")}}}) - - firstID := lm[2].Child - testMeta(t, s, cid, treeID, firstID, lm[2].Parent, Meta{Time: lm[2].Time, Items: meta}) - - // TreeAddByPath must return operations in increasing time order. - require.True(t, lm[0].Time < lm[1].Time) - require.True(t, lm[1].Time < lm[2].Time) - - meta[0].Value = []byte("YYY") - lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) - require.NoError(t, err) - require.Equal(t, 1, len(lm)) - - secondID := lm[0].Child - testMeta(t, s, cid, treeID, secondID, lm[0].Parent, Meta{Time: lm[0].Time, Items: meta}) - - t.Run("get versions", func(t *testing.T) { - // All versions. - nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) - require.NoError(t, err) - require.ElementsMatch(t, []Node{firstID, secondID}, nodes) - - // Latest version. - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true) - require.NoError(t, err) - require.Equal(t, []Node{secondID}, nodes) - }) - - meta[0].Value = []byte("ZZZ") - meta[1].Value = []byte("cat.jpg") - lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "dir"}, meta) - require.NoError(t, err) - require.Equal(t, 2, len(lm)) - testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("dir")}}}) - testMeta(t, s, cid, treeID, lm[1].Child, lm[1].Parent, Meta{Time: lm[1].Time, Items: meta}) - - t.Run("create internal nodes", func(t *testing.T) { - meta[0].Value = []byte("SomeValue") - meta[1].Value = []byte("another") - lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path"}, meta) - require.NoError(t, err) - require.Equal(t, 1, len(lm)) - - oldMove := lm[0] - - meta[0].Value = []byte("Leaf") - meta[1].Value = []byte("file.txt") - lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "another"}, meta) - require.NoError(t, err) - require.Equal(t, 2, len(lm)) - - testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, - Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("another")}}}) - testMeta(t, s, cid, treeID, lm[1].Child, lm[1].Parent, Meta{Time: lm[1].Time, Items: meta}) - - require.NotEqual(t, lm[0].Child, oldMove.Child) - testMeta(t, s, cid, treeID, oldMove.Child, oldMove.Parent, - Meta{Time: oldMove.Time, Items: []KeyValue{ - {AttributeVersion, []byte("SomeValue")}, - {AttributeFilename, []byte("another")}, - }}) - - t.Run("get by path", func(t *testing.T) { - nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another"}, false) - require.NoError(t, err) - require.Equal(t, 2, len(nodes)) - require.ElementsMatch(t, []Node{lm[0].Child, oldMove.Child}, nodes) - - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false) - require.NoError(t, err) - require.Equal(t, 1, len(nodes)) - require.Equal(t, lm[1].Child, nodes[0]) - }) - }) - - t.Run("empty component", func(t *testing.T) { - meta := []KeyValue{ - {Key: AttributeVersion, Value: []byte("XXX")}, - {Key: AttributeFilename, Value: []byte{}}, - } - lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) - require.NoError(t, err) - require.Equal(t, 1, len(lm)) - - nodes, err := s.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false) - require.NoError(t, err) - require.Equal(t, 1, len(nodes)) - require.Equal(t, lm[0].Child, nodes[0]) - }) -} - -func TestForest_Apply(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeApply(t, providers[i].construct) - }) - } -} - -func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) { - cid := cidtest.ID() - treeID := "version" - - testApply := func(t *testing.T, s Forest, child, parent Node, meta Meta) { - require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{ - Child: child, - Parent: parent, - Meta: meta, - }, false)) - } - - t.Run("add a child, then insert a parent removal", func(t *testing.T) { - s := constructor(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}}) - - meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}} - testApply(t, s, 11, 10, meta) - testMeta(t, s, cid, treeID, 11, 10, meta) - - testApply(t, s, 10, TrashID, Meta{Time: 2, Items: []KeyValue{{"parent", []byte{2}}}}) - testMeta(t, s, cid, treeID, 11, 10, meta) - }) - t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) { - s := constructor(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}} - testApply(t, s, 11, 10, meta) - testMeta(t, s, cid, treeID, 11, 10, meta) - - testApply(t, s, 10, 0, Meta{Time: 2, Items: []KeyValue{{"grand", []byte{1}}}}) - testMeta(t, s, cid, treeID, 11, 10, meta) - }) -} - -func TestForest_ApplySameOperation(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - parallel := providers[i].name != "inmemory" - testForestApplySameOperation(t, providers[i].construct, parallel) - }) - } -} - -func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, parallel bool) { - cid := cidtest.ID() - treeID := "version" - - batchSize := 3 - ctx := context.Background() - errG, _ := errgroup.WithContext(ctx) - if !parallel { - batchSize = 1 - errG.SetLimit(1) - } - - meta := []Meta{ - {Time: 1, Items: []KeyValue{{AttributeFilename, []byte("1")}, {"attr", []byte{1}}}}, - {Time: 2, Items: []KeyValue{{AttributeFilename, []byte("2")}, {"attr", []byte{1}}}}, - {Time: 3, Items: []KeyValue{{AttributeFilename, []byte("3")}, {"attr", []byte{1}}}}, - } - logs := []Move{ - { - Child: 1, - Parent: RootID, - Meta: meta[0], - }, - { - Child: 2, - Parent: 1, - Meta: meta[1], - }, - { - Child: 1, - Parent: 2, - Meta: meta[2], - }, - } - - check := func(t *testing.T, s Forest) { - testMeta(t, s, cid, treeID, 1, RootID, meta[0]) - testMeta(t, s, cid, treeID, 2, 1, meta[1]) - - nodes, err := s.TreeGetChildren(ctx, cid, treeID, RootID) - require.NoError(t, err) - require.Equal(t, []NodeInfo{{ID: 1, ParentID: RootID, Meta: meta[0]}}, nodes) - - nodes, err = s.TreeGetChildren(ctx, cid, treeID, 1) - require.NoError(t, err) - require.Equal(t, []NodeInfo{{ID: 2, ParentID: 1, Meta: meta[1]}}, nodes) - } - - t.Run("expected", func(t *testing.T) { - s := constructor(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - for i := range logs { - require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false)) - } - check(t, s) - }) - - s := constructor(t, WithMaxBatchSize(batchSize)) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) - for range batchSize { - errG.Go(func() error { - return s.TreeApply(ctx, cid, treeID, &logs[2], false) - }) - } - require.NoError(t, errG.Wait()) - require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[1], false)) - check(t, s) -} - -func TestForest_GetOpLog(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeGetOpLog(t, providers[i].construct) - }) - } -} - -func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) { - cid := cidtest.ID() - treeID := "version" - logs := []Move{ - { - Meta: Meta{Time: 4, Items: []KeyValue{{"grand", []byte{1}}}}, - Child: 1, - }, - { - Meta: Meta{Time: 5, Items: []KeyValue{{"second", []byte{1, 2, 3}}}}, - Child: 4, - }, - { - Parent: 10, - Meta: Meta{Time: 256 + 4, Items: []KeyValue{}}, // make sure keys are big-endian - Child: 11, - }, - } - - s := constructor(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - t.Run("empty log, no panic", func(t *testing.T) { - _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0) - require.ErrorIs(t, err, ErrTreeNotFound) - }) - - for i := range logs { - require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &logs[i], false)) - } - - testGetOpLog := func(t *testing.T, height uint64, m Move) { - lm, err := s.TreeGetOpLog(context.Background(), cid, treeID, height) - require.NoError(t, err) - require.Equal(t, m, lm) - } - - testGetOpLog(t, 0, logs[0]) - testGetOpLog(t, 4, logs[0]) - testGetOpLog(t, 5, logs[1]) - testGetOpLog(t, 6, logs[2]) - testGetOpLog(t, 260, logs[2]) - t.Run("missing entry", func(t *testing.T) { - testGetOpLog(t, 261, Move{}) - }) - t.Run("missing tree", func(t *testing.T) { - _, err := s.TreeGetOpLog(context.Background(), cid, treeID+"123", 4) - require.ErrorIs(t, err, ErrTreeNotFound) - }) -} - -func TestForest_TreeExists(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeExists(t, providers[i].construct) - }) - } -} - -func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) { - s := constructor(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) { - actual, err := s.TreeExists(context.Background(), cid, treeID) - require.NoError(t, err) - require.Equal(t, expected, actual) - } - - cid := cidtest.ID() - treeID := "version" - - t.Run("empty state, no panic", func(t *testing.T) { - checkExists(t, false, cid, treeID) - }) - - require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{Meta: Meta{Time: 11}, Parent: 0, Child: 1}, false)) - checkExists(t, true, cid, treeID) - - height, err := s.TreeHeight(context.Background(), cid, treeID) - require.NoError(t, err) - require.EqualValues(t, 11, height) - - checkExists(t, false, cidtest.ID(), treeID) // different CID, same tree - - _, err = s.TreeHeight(context.Background(), cidtest.ID(), treeID) - require.ErrorIs(t, err, ErrTreeNotFound) - - checkExists(t, false, cid, "another tree") // same CID, different tree - - t.Run("can be removed", func(t *testing.T) { - require.NoError(t, s.TreeDrop(context.Background(), cid, treeID)) - checkExists(t, false, cid, treeID) - }) -} - -func TestApplyTricky1(t *testing.T) { - ops := []Move{ - { - Parent: 1, - Meta: Meta{Time: 100}, - Child: 2, - }, - { - Parent: 0, - Meta: Meta{Time: 80}, - Child: 1, - }, - } - - expected := []struct{ child, parent Node }{ - {1, 0}, - {2, 1}, - } - - treeID := "version" - cid := cidtest.ID() - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - for i := range ops { - require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) - } - - for i := range expected { - _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child) - require.NoError(t, err) - require.Equal(t, expected[i].parent, parent) - } - }) - } -} - -func TestApplyTricky2(t *testing.T) { - // Apply operations in the reverse order and then insert an operation in the middle - // so that previous "old" parent becomes invalid. - ops := []Move{ - { - Parent: 10000, - Meta: Meta{Time: 100}, - Child: 5, - }, - { - Parent: 3, - Meta: Meta{Time: 80}, - Child: 5, - }, - { - Parent: 5, - Meta: Meta{Time: 40}, - Child: 3, - }, - { - Parent: 5, - Meta: Meta{Time: 60}, - Child: 1, - }, - { - Parent: 1, - Meta: Meta{Time: 90}, - Child: 2, - }, - { - Parent: 0, - Meta: Meta{Time: 10}, - Child: 5, - }, - } - - expected := []struct{ child, parent Node }{ - {5, 10_000}, - {3, 5}, - {2, 1}, - {1, 5}, - } - - treeID := "version" - cid := cidtest.ID() - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close(context.Background())) }() - - for i := range ops { - require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) - } - - for i := range expected { - _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child) - require.NoError(t, err) - require.Equal(t, expected[i].parent, parent) - } - }) - } -} - -func TestForest_ApplyRandom(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeApplyRandom(t, providers[i].construct) - }) - } -} - -func TestForest_ParallelApply(t *testing.T) { - for i := range providers { - if providers[i].name == "inmemory" { - continue - } - t.Run(providers[i].name, func(t *testing.T) { - testForestTreeParallelApply(t, providers[i].construct, 8, 128, 10) - }) - } -} - -// prepareRandomTree creates a random sequence of operation and applies them to tree. -// The operations are guaranteed to be applied and returned sorted by `Time`. -func prepareRandomTree(nodeCount, opCount int) []Move { - ops := make([]Move, nodeCount+opCount) - for i := range nodeCount { - ops[i] = Move{ - Parent: 0, - Meta: Meta{ - Time: Timestamp(i), - Items: []KeyValue{ - {Key: AttributeFilename, Value: []byte(strconv.Itoa(i))}, - {Value: make([]byte, 10)}, - }, - }, - Child: uint64(i) + 1, - } - rand.Read(ops[i].Meta.Items[1].Value) - } - - r := mrand.New(mrand.NewSource(time.Now().Unix())) - for i := nodeCount; i < len(ops); i++ { - ops[i] = Move{ - Parent: r.Uint64() % uint64(nodeCount+12), - Meta: Meta{ - Time: Timestamp(i + nodeCount), - Items: []KeyValue{ - {Key: AttributeFilename, Value: []byte(strconv.Itoa(i))}, - {Value: make([]byte, 10)}, - }, - }, - Child: r.Uint64() % uint64(nodeCount+10), - } - if r.Uint32()%5 == 0 { - ops[i].Parent = TrashID - } - rand.Read(ops[i].Meta.Items[1].Value) - } - - return ops -} - -func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) { - for i := range uint64(nodeCount) { - expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i) - require.NoError(t, err) - actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i) - require.NoError(t, err) - require.Equal(t, expectedParent, actualParent, "node id: %d", i) - require.Equal(t, expectedMeta, actualMeta, "node id: %d", i) - - if ma, ok := actual.(*memoryForest); ok { - me := expected.(*memoryForest) - require.Equal(t, len(me.treeMap), len(ma.treeMap)) - - for k, sa := range ma.treeMap { - se, ok := me.treeMap[k] - require.True(t, ok) - require.Equal(t, se.operations, sa.operations) - require.Equal(t, se.infoMap, sa.infoMap) - } - require.Equal(t, expected, actual, i) - } - } -} - -func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, opCount, iterCount int) { - r := mrand.New(mrand.NewSource(42)) - - const nodeCount = 5 - - ops := prepareRandomTree(nodeCount, opCount) - - cid := cidtest.ID() - treeID := "version" - - expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close(context.Background())) }() - - for i := range ops { - require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) - } - - for range iterCount { - // Shuffle random operations, leave initialization in place. - r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) - - actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true)) - wg := new(sync.WaitGroup) - ch := make(chan *Move) - for range batchSize { - wg.Add(1) - go func() { - defer wg.Done() - for op := range ch { - require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, op, false)) - } - }() - } - - for i := range ops { - ch <- &ops[i] - } - close(ch) - wg.Wait() - - compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close(context.Background())) - } -} - -func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) { - r := mrand.New(mrand.NewSource(42)) - - const ( - nodeCount = 5 - opCount = 20 - ) - - ops := prepareRandomTree(nodeCount, opCount) - - cid := cidtest.ID() - treeID := "version" - - expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close(context.Background())) }() - - for i := range ops { - require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) - } - - const iterCount = 200 - for range iterCount { - // Shuffle random operations, leave initialization in place. - r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) - - actual := constructor(t, WithNoSync(true)) - for i := range ops { - require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false)) - } - compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close(context.Background())) - } -} - -const benchNodeCount = 1000 - -var batchSizes = []int{1, 2, 4, 8, 16, 32} - -func BenchmarkApplySequential(b *testing.B) { - for i := range providers { - if providers[i].name == "inmemory" { // memory backend is not thread-safe - continue - } - b.Run(providers[i].name, func(b *testing.B) { - for _, bs := range batchSizes { - b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { - r := mrand.New(mrand.NewSource(time.Now().Unix())) - s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close(context.Background())) }() - - benchmarkApply(b, s, func(opCount int) []Move { - ops := make([]Move, opCount) - for i := range ops { - ops[i] = Move{ - Parent: uint64(r.Intn(benchNodeCount)), - Meta: Meta{ - Time: Timestamp(i), - Items: []KeyValue{{Value: []byte{0, 1, 2, 3, 4}}}, - }, - Child: uint64(r.Intn(benchNodeCount)), - } - } - return ops - }) - }) - } - }) - } -} - -func BenchmarkApplyReorderLast(b *testing.B) { - // Group operations in a blocks of 10, order blocks in increasing timestamp order, - // and operations in a single block in reverse. - const blockSize = 10 - - for i := range providers { - if providers[i].name == "inmemory" { // memory backend is not thread-safe - continue - } - b.Run(providers[i].name, func(b *testing.B) { - for _, bs := range batchSizes { - b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { - r := mrand.New(mrand.NewSource(time.Now().Unix())) - s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close(context.Background())) }() - - benchmarkApply(b, s, func(opCount int) []Move { - ops := make([]Move, opCount) - for i := range ops { - ops[i] = Move{ - Parent: uint64(r.Intn(benchNodeCount)), - Meta: Meta{ - Time: Timestamp(i), - Items: []KeyValue{{Value: []byte{0, 1, 2, 3, 4}}}, - }, - Child: uint64(r.Intn(benchNodeCount)), - } - if i != 0 && i%blockSize == 0 { - for j := range blockSize / 2 { - ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j] - } - } - } - return ops - }) - }) - } - }) - } -} - -func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) { - ops := genFunc(b.N) - cid := cidtest.ID() - treeID := "version" - ch := make(chan int, b.N) - for i := range b.N { - ch <- i - } - - b.ResetTimer() - b.ReportAllocs() - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if err := s.TreeApply(context.Background(), cid, treeID, &ops[<-ch], false); err != nil { - b.Fatalf("error in `Apply`: %v", err) - } - } - }) -} - -func TestTreeGetByPath(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testTreeGetByPath(t, providers[i].construct(t)) - }) - } -} - -func testTreeGetByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - cid := cidtest.ID() - treeID := "version" - - // / - // |- a (1) - // |- cat1.jpg, Version=TTT (3) - // |- b (2) - // |- cat1.jpg, Version=XXX (4) - // |- cat1.jpg, Version=YYY (5) - // |- cat2.jpg, Version=ZZZ (6) - testMove(t, s, 0, 1, 0, cid, treeID, "a", "") - testMove(t, s, 1, 2, 0, cid, treeID, "b", "") - testMove(t, s, 2, 3, 1, cid, treeID, "cat1.jpg", "TTT") - testMove(t, s, 3, 4, 2, cid, treeID, "cat1.jpg", "XXX") - testMove(t, s, 4, 5, 2, cid, treeID, "cat1.jpg", "YYY") - testMove(t, s, 5, 6, 2, cid, treeID, "cat2.jpg", "ZZZ") - - if mf, ok := s.(*memoryForest); ok { - single := mf.treeMap[cid.String()+"/"+treeID] - t.Run("test meta", func(t *testing.T) { - for i := range 6 { - require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time) - } - }) - } - - t.Run("invalid attribute", func(t *testing.T) { - _, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeVersion, []string{"", "TTT"}, false) - require.ErrorIs(t, err, ErrNotPathAttribute) - }) - - nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false) - require.NoError(t, err) - require.Equal(t, []Node{4, 5}, nodes) - - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false) - require.Equal(t, []Node{3}, nodes) - - t.Run("missing child", func(t *testing.T) { - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false) - require.True(t, len(nodes) == 0) - }) - t.Run("missing parent", func(t *testing.T) { - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false) - require.True(t, len(nodes) == 0) - }) - t.Run("empty path", func(t *testing.T) { - nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, nil, false) - require.True(t, len(nodes) == 0) - }) -} - -func testMove(t *testing.T, s Forest, ts int, node, parent Node, cid cidSDK.ID, treeID, filename, version string) { - items := make([]KeyValue, 1, 2) - items[0] = KeyValue{AttributeFilename, []byte(filename)} - if version != "" { - items = append(items, KeyValue{AttributeVersion, []byte(version)}) - } - - require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{ - Parent: parent, - Child: node, - Meta: Meta{ - Time: uint64(ts), - Items: items, - }, - }, false)) -} - -func TestGetTrees(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testTreeGetTrees(t, providers[i].construct(t)) - }) - } -} - -func testTreeGetTrees(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close(context.Background())) }() - - cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()} - d := CIDDescriptor{Position: 0, Size: 1} - - treeIDs := make(map[cidSDK.ID][]string, len(cids)) - for i, cid := range cids { - treeIDs[cid] = []string{ - fmt.Sprintf("test1_%d", i), - fmt.Sprintf("test2_%d", i), - fmt.Sprintf("test3_%d", i), - fmt.Sprintf("1test_%d", i), - fmt.Sprintf("2test_%d", i), - fmt.Sprintf("3test_%d", i), - "", - } - } - - for _, cid := range cids { - d.CID = cid - - for _, treeID := range treeIDs[cid] { - _, err := s.TreeAddByPath(context.Background(), d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil) - require.NoError(t, err) - } - } - - for _, cid := range cids { - d.CID = cid - - trees, err := s.TreeList(context.Background(), cid) - require.NoError(t, err) - - require.ElementsMatch(t, treeIDs[cid], trees) - } -} - -func TestTreeLastSyncHeight(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testTreeLastSyncHeight(t, providers[i].construct(t)) - }) - } -} - -func testTreeLastSyncHeight(t *testing.T, f ForestStorage) { - defer func() { require.NoError(t, f.Close(context.Background())) }() - - cnr := cidtest.ID() - treeID := "someTree" - - t.Run("ErrNotFound if no log operations are stored for a tree", func(t *testing.T) { - _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID) - require.ErrorIs(t, err, ErrTreeNotFound) - - err = f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 1) - require.ErrorIs(t, err, ErrTreeNotFound) - }) - - _, err := f.TreeMove(context.Background(), CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{ - Parent: RootID, - Child: 1, - }) - require.NoError(t, err) - - h, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID) - require.NoError(t, err) - require.EqualValues(t, 0, h) - - t.Run("separate storages for separate containers", func(t *testing.T) { - _, err := f.TreeLastSyncHeight(context.Background(), cidtest.ID(), treeID) - require.ErrorIs(t, err, ErrTreeNotFound) - }) - - require.NoError(t, f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 10)) - - h, err = f.TreeLastSyncHeight(context.Background(), cnr, treeID) - require.NoError(t, err) - require.EqualValues(t, 10, h) - - t.Run("removed correctly", func(t *testing.T) { - require.NoError(t, f.TreeDrop(context.Background(), cnr, treeID)) - - _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID) - require.ErrorIs(t, err, ErrTreeNotFound) - }) -} - -func TestForest_ListTrees(t *testing.T) { - for i := range providers { - t.Run(providers[i].name, func(t *testing.T) { - testTreeListTrees(t, providers[i].construct) - }) - } -} - -func testTreeListTrees(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) { - batchSize := 10 - t.Run("empty", func(t *testing.T) { - testTreeListTreesCount(t, constructor, batchSize, 0) - }) - t.Run("count lower than batch size", func(t *testing.T) { - testTreeListTreesCount(t, constructor, batchSize, batchSize-1) - }) - t.Run("count equals batch size", func(t *testing.T) { - testTreeListTreesCount(t, constructor, batchSize, batchSize) - }) - t.Run("count greater than batch size", func(t *testing.T) { - testTreeListTreesCount(t, constructor, batchSize, batchSize+1) - }) - t.Run("count equals multiplied batch size", func(t *testing.T) { - testTreeListTreesCount(t, constructor, batchSize, 3*batchSize) - }) - t.Run("count equals multiplied batch size with addition", func(t *testing.T) { - testTreeListTreesCount(t, constructor, batchSize, 3*batchSize+batchSize/2) - }) -} - -func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, count int) { - f := constructor(t) - var expected []ContainerIDTreeID - - treeIDs := []string{"version", "system", "s", "avada kedavra"} - for i := range count { - cid := cidtest.ID() - treeID := treeIDs[i%len(treeIDs)] - expected = append(expected, ContainerIDTreeID{ - CID: cid, - TreeID: treeID, - }) - - ops := prepareRandomTree(5, 5) - for _, op := range ops { - require.NoError(t, f.TreeApply(context.Background(), cid, treeID, &op, false)) - } - } - - actual, err := treeListAll(context.Background(), f, batchSize) - require.NoError(t, err) - - require.ElementsMatch(t, expected, actual) -} diff --git a/pkg/local_object_storage/pilorama/generic_test.go b/pkg/local_object_storage/pilorama/generic_test.go deleted file mode 100644 index d6a9f333b..000000000 --- a/pkg/local_object_storage/pilorama/generic_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package pilorama - -import ( - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest" -) - -func TestGeneric(t *testing.T) { - newPilorama := func(t *testing.T) storagetest.Component { - return NewBoltForest(WithPath(filepath.Join(t.TempDir(), "pilorama"))) - } - - storagetest.TestAll(t, newPilorama) -} diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go deleted file mode 100644 index b035be1e1..000000000 --- a/pkg/local_object_storage/pilorama/heap.go +++ /dev/null @@ -1,96 +0,0 @@ -package pilorama - -import ( - "container/heap" - "slices" - "strings" -) - -type heapInfo struct { - id MultiNode - filename string -} - -type filenameHeap []heapInfo - -func (h filenameHeap) Len() int { return len(h) } -func (h filenameHeap) Less(i, j int) bool { return h[i].filename < h[j].filename } -func (h filenameHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h *filenameHeap) Push(x any) { - *h = append(*h, x.(heapInfo)) -} - -func (h *filenameHeap) Pop() any { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -// fixedHeap maintains a fixed number of smallest elements started at some point. -type fixedHeap struct { - start *Cursor - sorted bool - count int - h *filenameHeap -} - -func newHeap(start *Cursor, count int) *fixedHeap { - h := new(filenameHeap) - heap.Init(h) - - return &fixedHeap{ - start: start, - count: count, - h: h, - } -} - -const amortizationMultiplier = 5 - -func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil { - if filename < h.start.GetFilename() { - return false - } else if filename == h.start.GetFilename() { - // A tree may have a lot of nodes with the same filename but different versions so that - // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call - // with the same filename. - pos := slices.Index(id, h.start.GetNode()) - if pos == -1 || pos+1 >= len(id) { - return false - } - id = id[pos+1:] - } - } - - *h.h = append(*h.h, heapInfo{id: id, filename: filename}) - h.sorted = false - - if h.h.Len() > h.count*amortizationMultiplier { - slices.SortFunc(*h.h, func(a, b heapInfo) int { - return strings.Compare(a.filename, b.filename) - }) - *h.h = (*h.h)[:h.count] - } - return true -} - -func (h *fixedHeap) pop() (heapInfo, bool) { - if !h.sorted { - slices.SortFunc(*h.h, func(a, b heapInfo) int { - return strings.Compare(a.filename, b.filename) - }) - if len(*h.h) > h.count { - *h.h = (*h.h)[:h.count] - } - h.sorted = true - } - if len(*h.h) != 0 { - info := (*h.h)[0] - *h.h = (*h.h)[1:] - return info, true - } - return heapInfo{}, false -} diff --git a/pkg/local_object_storage/pilorama/info.go b/pkg/local_object_storage/pilorama/info.go deleted file mode 100644 index 0040b9dca..000000000 --- a/pkg/local_object_storage/pilorama/info.go +++ /dev/null @@ -1,24 +0,0 @@ -package pilorama - -// Info groups the information about the pilorama. -type Info struct { - // Path contains path to the root-directory of the pilorama. - Path string - // Backend is the pilorama storage type. Either "boltdb" or "memory". - Backend string -} - -// DumpInfo implements the ForestStorage interface. -func (t *boltForest) DumpInfo() Info { - return Info{ - Path: t.path, - Backend: "boltdb", - } -} - -// DumpInfo implements the ForestStorage interface. -func (f *memoryForest) DumpInfo() Info { - return Info{ - Backend: "memory", - } -} diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go deleted file mode 100644 index 28b7faec8..000000000 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ /dev/null @@ -1,213 +0,0 @@ -package pilorama - -import ( - "cmp" - "slices" -) - -// nodeInfo couples parent and metadata. -type nodeInfo struct { - Parent Node - Meta Meta -} - -type move struct { - Move - HasOld bool - Old nodeInfo -} - -// memoryTree represents memoryTree being replicated. -type memoryTree struct { - operations []move - tree -} - -// newMemoryTree constructs new empty tree. -func newMemoryTree() *memoryTree { - return &memoryTree{ - tree: tree{ - infoMap: make(map[Node]nodeInfo), - }, - } -} - -// undo un-does op and changes s in-place. -func (s *memoryTree) undo(op *move) { - if op.HasOld { - s.infoMap[op.Child] = op.Old - } else { - delete(s.infoMap, op.Child) - } -} - -// Apply puts op in log at a proper position, re-applies all subsequent operations -// from log and changes s in-place. -func (s *memoryTree) Apply(op *Move) error { - var index int - for index = len(s.operations); index > 0; index-- { - if s.operations[index-1].Time <= op.Time { - break - } - } - - if index == len(s.operations) { - s.operations = append(s.operations, s.do(op)) - return nil - } - - s.operations = append(s.operations[:index+1], s.operations[index:]...) - for i := len(s.operations) - 1; i > index; i-- { - s.undo(&s.operations[i]) - } - - s.operations[index] = s.do(op) - - for i := index + 1; i < len(s.operations); i++ { - s.operations[i] = s.do(&s.operations[i].Move) - } - return nil -} - -// do performs a single move operation on a tree. -func (s *memoryTree) do(op *Move) move { - m := op.Meta - if m.Items == nil { - m.Items = []KeyValue{} - } - lm := move{ - Move: Move{ - Parent: op.Parent, - Meta: m, - Child: op.Child, - }, - } - - shouldPut := !s.isAncestor(op.Child, op.Parent) - p, ok := s.infoMap[op.Child] - if ok { - lm.HasOld = true - lm.Old = p - } - - if !shouldPut { - return lm - } - - if !ok { - p.Meta.Time = op.Time - } - - p.Meta = m - p.Parent = op.Parent - s.infoMap[op.Child] = p - - return lm -} - -func (s *memoryTree) timestamp(pos, size int) Timestamp { - if len(s.operations) == 0 { - return nextTimestamp(0, uint64(pos), uint64(size)) - } - return nextTimestamp(s.operations[len(s.operations)-1].Time, uint64(pos), uint64(size)) -} - -func (s *memoryTree) findSpareID() Node { - id := uint64(1) - for _, ok := s.infoMap[id]; ok; _, ok = s.infoMap[id] { - id++ - } - return id -} - -// tree is a mapping from the child nodes to their parent and metadata. -type tree struct { - syncHeight uint64 - infoMap map[Node]nodeInfo -} - -func (t tree) getChildren(parent Node) []Node { - var children []Node - for c, info := range t.infoMap { - if info.Parent == parent { - children = append(children, c) - } - } - - slices.SortFunc(children, func(ci, cj uint64) int { - a := t.infoMap[ci] - b := t.infoMap[cj] - return cmp.Compare(a.Meta.Time, b.Meta.Time) - }) - return children -} - -// isAncestor returns true if parent is an ancestor of a child. -// For convenience, also return true if parent == child. -func (t tree) isAncestor(parent, child Node) bool { - for c := child; c != parent; { - p, ok := t.infoMap[c] - if !ok { - return false - } - c = p.Parent - } - return true -} - -// getPathPrefix descends by path constructed from values of attr until -// there is no node corresponding to a path element. Returns the amount of nodes -// processed and ID of the last node. -func (t tree) getPathPrefix(attr string, path []string) (int, Node) { - var curNode Node - -loop: - for i := range path { - children := t.getChildren(curNode) - for j := range children { - meta := t.infoMap[children[j]].Meta - f := meta.GetAttr(attr) - if len(meta.Items) == 1 && string(f) == path[i] { - curNode = children[j] - continue loop - } - } - return i, curNode - } - - return len(path), curNode -} - -// getByPath returns list of nodes which have the specified path from root -// descending by values of attr from meta. -// If latest is true, only the latest node is returned. -func (t tree) getByPath(attr string, path []string, latest bool) []Node { - if len(path) == 0 { - return nil - } - - i, curNode := t.getPathPrefix(attr, path[:len(path)-1]) - if i < len(path)-1 { - return nil - } - - var nodes []Node - var lastTS Timestamp - - children := t.getChildren(curNode) - for i := range children { - info := t.infoMap[children[i]] - fileName := string(info.Meta.GetAttr(attr)) - if fileName == path[len(path)-1] { - if latest { - if info.Meta.Time >= lastTS { - nodes = append(nodes[:0], children[i]) - } - } else { - nodes = append(nodes, children[i]) - } - } - } - - return nodes -} diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go deleted file mode 100644 index e1f6cd8e7..000000000 --- a/pkg/local_object_storage/pilorama/interface.go +++ /dev/null @@ -1,193 +0,0 @@ -package pilorama - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// Forest represents CRDT tree. -type Forest interface { - // TreeMove moves node in the tree. - // If the parent of the move operation is TrashID, the node is removed. - // If the child of the move operation is RootID, new ID is generated and added to a tree. - TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) - // TreeAddByPath adds new node in the tree using provided path. - // The path is constructed by descending from the root using the values of the attr in meta. - // Internal nodes in path should have exactly one attribute, otherwise a new node is created. - TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) - // TreeApply applies replicated operation from another node. - // If background is true, TreeApply will first check whether an operation exists. - TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error - // TreeApplyBatch applies replicated operations from another node. - TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error - // TreeGetByPath returns all nodes corresponding to the path. - // The path is constructed by descending from the root using the values of the - // AttributeFilename in meta. - // The last argument determines whether only the node with the latest timestamp is returned. - // Should return ErrTreeNotFound if the tree is not found, and empty result if the path is not in the tree. - TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) - // TreeGetMeta returns meta information of the node with the specified ID. - // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) - // TreeGetChildren returns children of the node with the specified ID. The order is arbitrary. - // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) - // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute.. - // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) - // TreeGetOpLog returns first log operation stored at or above the height. - // In case no such operation is found, empty Move and nil error should be returned. - TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) - // TreeDrop drops a tree from the database. - // If the tree is not found, ErrTreeNotFound should be returned. - // In case of empty treeID drops all trees related to container. - TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error - // TreeList returns all the tree IDs that have been added to the - // passed container ID. Nil slice should be returned if no tree found. - TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) - // TreeExists checks if a tree exists locally. - // If the tree is not found, false and a nil error should be returned. - TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) - // TreeUpdateLastSyncHeight updates last log height synchronized with _all_ container nodes. - TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error - // TreeLastSyncHeight returns last log height synchronized with _all_ container nodes. - TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) - // TreeHeight returns current tree height. - TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) -} - -type ForestStorage interface { - // DumpInfo returns information about the pilorama. - DumpInfo() Info - Init(context.Context) error - Open(context.Context, mode.Mode) error - Close(context.Context) error - SetMode(context.Context, mode.Mode) error - SetParentID(id string) - Forest - - // TreeListTrees returns all pairs "containerID:treeID". - TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) - TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error -} - -const ( - AttributeFilename = "FileName" - AttributeVersion = "Version" -) - -// Cursor keeps state between function calls for traversing nodes. -// It stores the attributes associated with a previous call, allowing subsequent operations -// to resume traversal from this point rather than starting from the beginning. -type Cursor struct { - // Last traversed filename. - filename string - - // Last traversed node. - node Node -} - -func NewCursor(filename string, node Node) *Cursor { - return &Cursor{ - filename: filename, - node: node, - } -} - -func (c *Cursor) GetFilename() string { - if c == nil { - return "" - } - return c.filename -} - -func (c *Cursor) GetNode() Node { - if c == nil { - return Node(0) - } - return c.node -} - -// CIDDescriptor contains container ID and information about the node position -// in the list of container nodes. -type CIDDescriptor struct { - CID cidSDK.ID - Position int - Size int -} - -// ErrInvalidCIDDescriptor is returned when info about tne node position -// in the container is invalid. -var ErrInvalidCIDDescriptor = logicerr.New("cid descriptor is invalid") - -func (d CIDDescriptor) checkValid() bool { - return 0 <= d.Position && d.Position < d.Size -} - -var treeListTreesBatchSizeDefault = 1000 - -type ContainerIDTreeID struct { - CID cidSDK.ID - TreeID string -} - -type TreeListTreesPrm struct { - NextPageToken []byte - // BatchSize is batch size to list trees. If not lower or equals zero, than treeListTreesBatchSizeDefault is used. - BatchSize int -} - -type TreeListTreesResult struct { - NextPageToken []byte - Items []ContainerIDTreeID -} - -type treeList interface { - TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) -} - -func TreeListAll(ctx context.Context, f treeList) ([]ContainerIDTreeID, error) { - return treeListAll(ctx, f, treeListTreesBatchSizeDefault) -} - -func treeListAll(ctx context.Context, f treeList, batchSize int) ([]ContainerIDTreeID, error) { - var prm TreeListTreesPrm - prm.BatchSize = batchSize - var result []ContainerIDTreeID - first := true - - for len(prm.NextPageToken) > 0 || first { - first = false - - res, err := f.TreeListTrees(ctx, prm) - if err != nil { - return nil, err - } - prm.NextPageToken = res.NextPageToken - result = append(result, res.Items...) - } - - return result, nil -} - -func TreeCountAll(ctx context.Context, f treeList) (uint64, error) { - var prm TreeListTreesPrm - var result uint64 - first := true - - for len(prm.NextPageToken) > 0 || first { - first = false - - res, err := f.TreeListTrees(ctx, prm) - if err != nil { - return 0, err - } - prm.NextPageToken = res.NextPageToken - result += uint64(len(res.Items)) - } - - return result, nil -} diff --git a/pkg/local_object_storage/pilorama/meta.go b/pkg/local_object_storage/pilorama/meta.go deleted file mode 100644 index 45e9c2f79..000000000 --- a/pkg/local_object_storage/pilorama/meta.go +++ /dev/null @@ -1,90 +0,0 @@ -package pilorama - -import "github.com/nspcc-dev/neo-go/pkg/io" - -func (x *Meta) FromBytes(data []byte) error { - if len(data) == 0 { - x.Items = nil - x.Time = 0 - return nil - } - - r := io.NewBinReaderFromBuf(data) - x.DecodeBinary(r) - return r.Err -} - -func (x Meta) Bytes() []byte { - w := io.NewBufBinWriter() - x.EncodeBinary(w.BinWriter) - return w.Bytes() -} - -func (x Meta) GetAttr(name string) []byte { - return findAttr(x.Items, name) -} - -func findAttr(ms []KeyValue, name string) []byte { - for _, kv := range ms { - if kv.Key == name { - return kv.Value - } - } - return nil -} - -// DecodeBinary implements the io.Serializable interface. -func (x *Meta) DecodeBinary(r *io.BinReader) { - ts := r.ReadVarUint() - size := r.ReadVarUint() - m := make([]KeyValue, size) - for i := range m { - m[i].Key = r.ReadString() - m[i].Value = r.ReadVarBytes() - } - if r.Err != nil { - return - } - - x.Time = ts - x.Items = m -} - -// EncodeBinary implements the io.Serializable interface. -func (x Meta) EncodeBinary(w *io.BinWriter) { - w.WriteVarUint(x.Time) - w.WriteVarUint(uint64(len(x.Items))) - for _, e := range x.Items { - w.WriteString(e.Key) - w.WriteVarBytes(e.Value) - } -} - -// Size returns size of x in bytes. -func (x Meta) Size() int { - size := getVarIntSize(x.Time) - size += getVarIntSize(uint64(len(x.Items))) - for i := range x.Items { - ln := len(x.Items[i].Key) - size += getVarIntSize(uint64(ln)) + ln - - ln = len(x.Items[i].Value) - size += getVarIntSize(uint64(ln)) + ln - } - return size -} - -// getVarIntSize returns the size in number of bytes of a variable integer. -// (reference: GetVarSize(int value), https://github.com/neo-project/neo/blob/master/neo/IO/Helper.cs) -func getVarIntSize(value uint64) int { - var size int - - if value < 0xFD { - size = 1 // unit8 - } else if value <= 0xFFFF { - size = 3 // byte + uint16 - } else { - size = 5 // byte + uint32 - } - return size -} diff --git a/pkg/local_object_storage/pilorama/meta_test.go b/pkg/local_object_storage/pilorama/meta_test.go deleted file mode 100644 index f329f6092..000000000 --- a/pkg/local_object_storage/pilorama/meta_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package pilorama - -import ( - "crypto/rand" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMeta_Bytes(t *testing.T) { - t.Run("empty", func(t *testing.T) { - var m Meta - require.NoError(t, m.FromBytes(nil)) - require.True(t, len(m.Items) == 0) - require.Equal(t, uint64(0), m.Time) - require.Equal(t, []byte{0, 0}, m.Bytes()) - }) - t.Run("empty attribute value", func(t *testing.T) { - expected := Meta{ - Time: 123, - Items: []KeyValue{ - {"abc", []byte{1, 2, 3}}, - {AttributeFilename, []byte{}}, - }, - } - - data := expected.Bytes() - - var actual Meta - require.NoError(t, actual.FromBytes(data)) - require.Equal(t, expected, actual) - }) - t.Run("filled", func(t *testing.T) { - expected := Meta{ - Time: 123, - Items: []KeyValue{ - {"abc", []byte{1, 2, 3}}, - {"xyz", []byte{5, 6, 7, 8}}, - }, - } - - data := expected.Bytes() - - var actual Meta - require.NoError(t, actual.FromBytes(data)) - require.Equal(t, expected, actual) - - t.Run("error", func(t *testing.T) { - require.Error(t, new(Meta).FromBytes(data[:len(data)/2])) - }) - }) -} - -func TestMeta_GetAttr(t *testing.T) { - attr := [][]byte{ - make([]byte, 5), - make([]byte, 10), - } - for i := range attr { - rand.Read(attr[i]) - } - - m := Meta{Items: []KeyValue{{"abc", attr[0]}, {"xyz", attr[1]}}} - require.Equal(t, attr[0], m.GetAttr("abc")) - require.Equal(t, attr[1], m.GetAttr("xyz")) - - require.Nil(t, m.GetAttr("a")) - require.Nil(t, m.GetAttr("xyza")) - require.Nil(t, m.GetAttr("")) -} diff --git a/pkg/local_object_storage/pilorama/metrics.go b/pkg/local_object_storage/pilorama/metrics.go deleted file mode 100644 index 6ffc479e4..000000000 --- a/pkg/local_object_storage/pilorama/metrics.go +++ /dev/null @@ -1,23 +0,0 @@ -package pilorama - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -type Metrics interface { - SetParentID(id string) - - SetMode(m mode.ComponentMode) - Close() - - AddMethodDuration(method string, d time.Duration, success bool) -} - -type noopMetrics struct{} - -func (m *noopMetrics) SetParentID(string) {} -func (m *noopMetrics) SetMode(mode.ComponentMode) {} -func (m *noopMetrics) Close() {} -func (m *noopMetrics) AddMethodDuration(string, time.Duration, bool) {} diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go deleted file mode 100644 index 0c042aa56..000000000 --- a/pkg/local_object_storage/pilorama/mode_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package pilorama - -import ( - "context" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" -) - -func Test_Mode(t *testing.T) { - t.Parallel() - f := NewBoltForest( - []Option{ - WithPath(filepath.Join(t.TempDir(), "test.db")), - WithMaxBatchSize(1), - }...) - - require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly)) - require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init(context.Background())) - require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close(context.Background())) - - require.NoError(t, f.Open(context.Background(), mode.Degraded)) - require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init(context.Background())) - require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close(context.Background())) -} diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go deleted file mode 100644 index 36d347f10..000000000 --- a/pkg/local_object_storage/pilorama/multinode.go +++ /dev/null @@ -1,53 +0,0 @@ -package pilorama - -import "bytes" - -// MultiNode represents a group of internal nodes accessible by the same path, but having different id. -type MultiNode []Node - -// MultiNodeInfo represents a group of internal nodes accessible by the same path, but having different id. -type MultiNodeInfo struct { - Children MultiNode - Parents MultiNode - Timestamps []uint64 - Meta []KeyValue -} - -func (r *MultiNodeInfo) Add(info NodeInfo) bool { - if !isInternal(info.Meta.Items) || !isInternal(r.Meta) || - !bytes.Equal(r.Meta[0].Value, info.Meta.Items[0].Value) { - return false - } - - r.Children = append(r.Children, info.ID) - r.Parents = append(r.Parents, info.ParentID) - r.Timestamps = append(r.Timestamps, info.Meta.Time) - return true -} - -func (r *MultiNodeInfo) LastChild() Node { - return r.Children[len(r.Children)-1] -} - -func (n NodeInfo) ToMultiNode() MultiNodeInfo { - return MultiNodeInfo{ - Children: MultiNode{n.ID}, - Parents: MultiNode{n.ParentID}, - Timestamps: []uint64{n.Meta.Time}, - Meta: n.Meta.Items, - } -} - -func isInternal(m []KeyValue) bool { - return len(m) == 1 && m[0].Key == AttributeFilename -} - -func mergeNodeInfos(ns []NodeInfo) []MultiNodeInfo { - var r []MultiNodeInfo - for _, info := range ns { - if len(r) == 0 || !r[len(r)-1].Add(info) { - r = append(r, info.ToMultiNode()) - } - } - return r -} diff --git a/pkg/local_object_storage/pilorama/option.go b/pkg/local_object_storage/pilorama/option.go deleted file mode 100644 index d576d427f..000000000 --- a/pkg/local_object_storage/pilorama/option.go +++ /dev/null @@ -1,61 +0,0 @@ -package pilorama - -import ( - "io/fs" - "os" - "time" -) - -type Option func(*cfg) - -type cfg struct { - path string - perm fs.FileMode - noSync bool - maxBatchDelay time.Duration - maxBatchSize int - openFile func(string, int, fs.FileMode) (*os.File, error) - metrics Metrics -} - -func WithPath(path string) Option { - return func(c *cfg) { - c.path = path - } -} - -func WithPerm(perm fs.FileMode) Option { - return func(c *cfg) { - c.perm = perm - } -} - -func WithNoSync(noSync bool) Option { - return func(c *cfg) { - c.noSync = noSync - } -} - -func WithMaxBatchDelay(d time.Duration) Option { - return func(c *cfg) { - c.maxBatchDelay = d - } -} - -func WithMaxBatchSize(size int) Option { - return func(c *cfg) { - c.maxBatchSize = size - } -} - -func WithOpenFile(openFile func(string, int, fs.FileMode) (*os.File, error)) Option { - return func(c *cfg) { - c.openFile = openFile - } -} - -func WithMetrics(m Metrics) Option { - return func(c *cfg) { - c.metrics = m - } -} diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go deleted file mode 100644 index eecee1527..000000000 --- a/pkg/local_object_storage/pilorama/split_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package pilorama - -import ( - "context" - "strings" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" -) - -func TestDuplicateDirectory(t *testing.T) { - for i := range providers { - if providers[i].name == "inmemory" { - continue - } - t.Run(providers[i].name, func(t *testing.T) { - testDuplicateDirectory(t, providers[i].construct(t)) - }) - } -} - -func testDuplicateDirectory(t *testing.T, f Forest) { - ctx := context.Background() - d := CIDDescriptor{CID: cidtest.ID(), Size: 1} - treeID := "sometree" - - treeApply := func(t *testing.T, parent, child uint64, filename string, internal bool) { - // Nothing magic here, we add items in order and children are unique. - // This simplifies function interface a bit. - ts := child - - kv := []KeyValue{{Key: AttributeFilename, Value: []byte(filename)}} - if !internal { - kv = append(kv, KeyValue{Key: "uniqueAttr", Value: []byte{byte(child)}}) - } - - err := f.TreeApply(ctx, d.CID, treeID, &Move{ - Parent: parent, - Child: child, - Meta: Meta{ - Time: ts, - Items: kv, - }, - }, true) - require.NoError(t, err) - } - - // The following tree is constructed: - // 0 - // [1] |-- dir1 (internal) - // [2] |-- value1 - // [3] |-- dir3 (internal) - // [4] |-- value3 - // [5] |-- dir1 (internal) - // [6] |-- value2 - // [7] |-- dir3 (internal) - // [8] |-- value4 - // [9] |-- dir2 (internal) - // [10] |-- value0 - treeApply(t, RootID, 1, "dir1", true) - treeApply(t, 1, 2, "value1", false) - treeApply(t, 1, 3, "dir3", true) - treeApply(t, 3, 4, "value3", false) - treeApply(t, RootID, 5, "dir1", true) - treeApply(t, 5, 6, "value2", false) - treeApply(t, 5, 7, "dir3", true) - treeApply(t, 7, 8, "value4", false) - treeApply(t, RootID, 9, "dir2", true) - treeApply(t, RootID, 10, "value0", false) - - // The compacted view: - // 0 - // [1,5] |-- dir1 (internal) - // [2] |-- value1 - // [3,7] |-- dir3 (internal) - // [4] |-- value3 - // [8] |-- value4 - // [6] |-- value2 - // [9] |-- dir2 (internal) - // [10] |-- value0 - testGetByPath := func(t *testing.T, p string) []byte { - pp := strings.Split(p, "/") - nodes, err := f.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, pp, false) - require.NoError(t, err) - require.Equal(t, 1, len(nodes)) - - meta, _, err := f.TreeGetMeta(ctx, d.CID, treeID, nodes[0]) - require.NoError(t, err) - require.Equal(t, []byte(pp[len(pp)-1]), meta.GetAttr(AttributeFilename)) - return meta.GetAttr("uniqueAttr") - } - - require.Equal(t, []byte{2}, testGetByPath(t, "dir1/value1")) - require.Equal(t, []byte{4}, testGetByPath(t, "dir1/dir3/value3")) - require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4")) - require.Equal(t, []byte{10}, testGetByPath(t, "value0")) - - testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) { - res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize) - require.NoError(t, err) - return res, last - } - - t.Run("test sorted listing, full children branch", func(t *testing.T) { - t.Run("big batch size", func(t *testing.T) { - res, _ := testSortedByFilename(t, MultiNode{RootID}, nil, 10) - require.Equal(t, 3, len(res)) - require.Equal(t, MultiNode{1, 5}, res[0].Children) - require.Equal(t, MultiNode{9}, res[1].Children) - require.Equal(t, MultiNode{10}, res[2].Children) - - t.Run("multi-root", func(t *testing.T) { - res, _ := testSortedByFilename(t, MultiNode{1, 5}, nil, 10) - require.Equal(t, 3, len(res)) - require.Equal(t, MultiNode{3, 7}, res[0].Children) - require.Equal(t, MultiNode{2}, res[1].Children) - require.Equal(t, MultiNode{6}, res[2].Children) - }) - }) - t.Run("small batch size", func(t *testing.T) { - res, last := testSortedByFilename(t, MultiNode{RootID}, nil, 1) - require.Equal(t, 1, len(res)) - require.Equal(t, MultiNode{1, 5}, res[0].Children) - - res, last = testSortedByFilename(t, MultiNode{RootID}, last, 1) - require.Equal(t, 1, len(res)) - require.Equal(t, MultiNode{9}, res[0].Children) - - res, last = testSortedByFilename(t, MultiNode{RootID}, last, 1) - require.Equal(t, 1, len(res)) - require.Equal(t, MultiNode{10}, res[0].Children) - - res, _ = testSortedByFilename(t, MultiNode{RootID}, last, 1) - require.Equal(t, 0, len(res)) - - t.Run("multi-root", func(t *testing.T) { - res, last := testSortedByFilename(t, MultiNode{1, 5}, nil, 1) - require.Equal(t, 1, len(res)) - require.Equal(t, MultiNode{3, 7}, res[0].Children) - - res, last = testSortedByFilename(t, MultiNode{1, 5}, last, 1) - require.Equal(t, 1, len(res)) - require.Equal(t, MultiNode{2}, res[0].Children) - - res, last = testSortedByFilename(t, MultiNode{1, 5}, last, 1) - require.Equal(t, 1, len(res)) - require.Equal(t, MultiNode{6}, res[0].Children) - - res, _ = testSortedByFilename(t, MultiNode{RootID}, last, 1) - require.Equal(t, 0, len(res)) - }) - }) - }) -} diff --git a/pkg/local_object_storage/pilorama/types.go b/pkg/local_object_storage/pilorama/types.go deleted file mode 100644 index 8d8616364..000000000 --- a/pkg/local_object_storage/pilorama/types.go +++ /dev/null @@ -1,63 +0,0 @@ -package pilorama - -import ( - "math" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" -) - -// Timestamp is an alias for integer timestamp type. -// TODO: remove after the debugging. -type Timestamp = uint64 - -// Node is used to represent nodes. -// TODO: remove after the debugging. -type Node = uint64 - -// Meta represents arbitrary meta information. -// TODO: remove after the debugging or create a proper interface. -type Meta struct { - Time Timestamp - Items []KeyValue -} - -// KeyValue represents a key-value pair. -type KeyValue struct { - Key string - Value []byte -} - -// Move represents a single move operation. -type Move struct { - Parent Node - Meta - // Child represents the ID of a node being moved. If zero, new ID is generated. - Child Node -} - -const ( - // RootID represents the ID of a root node. - RootID = 0 - // TrashID is a parent for all removed nodes. - TrashID = math.MaxUint64 -) - -var ( - // ErrTreeNotFound is returned when the requested tree is not found. - ErrTreeNotFound = logicerr.New("tree not found") - // ErrNotPathAttribute is returned when the path is trying to be constructed with a non-internal - // attribute. Currently the only attribute allowed is AttributeFilename. - ErrNotPathAttribute = logicerr.New("attribute can't be used in path construction") -) - -// isAttributeInternal returns true iff key can be used in `*ByPath` methods. -// For such attributes an additional index is maintained in the database. -func isAttributeInternal(key string) bool { - return key == AttributeFilename -} - -type NodeInfo struct { - ID Node - Meta Meta - ParentID Node -} diff --git a/pkg/local_object_storage/pilorama/util.go b/pkg/local_object_storage/pilorama/util.go deleted file mode 100644 index 53b7e1d50..000000000 --- a/pkg/local_object_storage/pilorama/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package pilorama - -// nextTimestamp accepts the latest local timestamp, node position in a container and container size. -// Returns the next timestamp which can be generated by this node. -func nextTimestamp(ts Timestamp, pos, size uint64) Timestamp { - base := ts/size*size + pos - if ts < base { - return base - } - return base + size -} diff --git a/pkg/local_object_storage/pilorama/util_test.go b/pkg/local_object_storage/pilorama/util_test.go deleted file mode 100644 index bfa141c70..000000000 --- a/pkg/local_object_storage/pilorama/util_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package pilorama - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNextTimestamp(t *testing.T) { - testCases := []struct { - latest Timestamp - pos, size uint64 - expected Timestamp - }{ - {0, 0, 1, 1}, - {2, 0, 1, 3}, - {0, 0, 2, 2}, - {0, 1, 2, 1}, - {10, 0, 4, 12}, - {11, 0, 4, 12}, - {12, 0, 4, 16}, - {10, 1, 4, 13}, - {11, 1, 4, 13}, - {12, 1, 4, 13}, - {10, 2, 4, 14}, - {11, 2, 4, 14}, - {12, 2, 4, 14}, - {10, 3, 4, 11}, - {11, 3, 4, 15}, - {12, 3, 4, 15}, - } - - for _, tc := range testCases { - actual := nextTimestamp(tc.latest, tc.pos, tc.size) - require.Equal(t, tc.expected, actual, - "latest %d, pos %d, size %d", tc.latest, tc.pos, tc.size) - } -} diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go deleted file mode 100644 index b4015ae8d..000000000 --- a/pkg/local_object_storage/shard/container.go +++ /dev/null @@ -1,150 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type ContainerSizePrm struct { - cnr cid.ID -} - -type ContainerSizeRes struct { - size uint64 -} - -func (p *ContainerSizePrm) SetContainerID(cnr cid.ID) { - p.cnr = cnr -} - -func (r ContainerSizeRes) Size() uint64 { - return r.size -} - -func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return ContainerSizeRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ContainerSizeRes{}, err - } - defer release() - - size, err := s.metaBase.ContainerSize(prm.cnr) - if err != nil { - return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) - } - - return ContainerSizeRes{ - size: size, - }, nil -} - -type ContainerCountPrm struct { - ContainerID cid.ID -} - -type ContainerCountRes struct { - Phy uint64 - Logic uint64 - User uint64 -} - -func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (ContainerCountRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ContainerCount", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Stringer("container_id", prm.ContainerID), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return ContainerCountRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ContainerCountRes{}, err - } - defer release() - - counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) - if err != nil { - return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) - } - - return ContainerCountRes{ - Phy: counters.Phy, - Logic: counters.Logic, - User: counters.User, - }, nil -} - -func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.DeleteContainerSize", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Stringer("container_id", id), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - - return s.metaBase.DeleteContainerSize(ctx, id) -} - -func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.DeleteContainerCount", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Stringer("container_id", id), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - - return s.metaBase.DeleteContainerCount(ctx, id) -} diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go deleted file mode 100644 index a607f70f7..000000000 --- a/pkg/local_object_storage/shard/control.go +++ /dev/null @@ -1,484 +0,0 @@ -package shard - -import ( - "context" - "errors" - "fmt" - "slices" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error { - s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode, - zap.String("stage", stage), - zap.Stringer("mode", mode.ReadOnly), - zap.Error(err)) - - err = s.SetMode(ctx, mode.ReadOnly) - if err == nil { - return nil - } - - s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode, - zap.String("stage", stage), - zap.Stringer("mode", mode.DegradedReadOnly), - zap.Error(err)) - - err = s.SetMode(ctx, mode.DegradedReadOnly) - if err != nil { - return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly) - } - return nil -} - -// Open opens all Shard's components. -func (s *Shard) Open(ctx context.Context) error { - components := []interface { - Open(context.Context, mode.Mode) error - }{ - s.blobStor, - } - m := s.GetMode() - - if !m.NoMetabase() { - components = append(components, s.metaBase) - } - - if s.hasWriteCache() && !m.NoMetabase() { - components = append(components, s.writeCache) - } - - if s.pilorama != nil { - components = append(components, s.pilorama) - } - - for i, component := range components { - if err := component.Open(ctx, m); err != nil { - if component == s.metaBase { - // We must first open all other components to avoid - // opening non-existent DB in read-only mode. - for j := i + 1; j < len(components); j++ { - if err := components[j].Open(ctx, m); err != nil { - // Other components must be opened, fail. - return fmt.Errorf("open %T: %w", components[j], err) - } - } - err = s.handleMetabaseFailure(ctx, "open", err) - if err != nil { - return err - } - - break - } - - return fmt.Errorf("open %T: %w", component, err) - } - } - return nil -} - -type metabaseSynchronizer Shard - -func (x *metabaseSynchronizer) Init(ctx context.Context) error { - ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init") - defer span.End() - - return (*Shard)(x).refillMetabase(ctx) -} - -// Init initializes all Shard's components. -func (s *Shard) Init(ctx context.Context) error { - m := s.GetMode() - if err := s.initializeComponents(ctx, m); err != nil { - return err - } - - s.updateMetrics(ctx) - - s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - newEpochChan: make(chan uint64), - newEpochHandlers: &newEpochHandlers{ - cancelFunc: func() {}, - handlers: []newEpochHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, - }, - }, - } - if s.gc.metrics != nil { - s.gc.metrics.SetShardID(s.info.ID.String()) - } - - s.gc.init(ctx) - - s.rb = newRebuilder() - if !m.NoMetabase() { - s.rb.Start(ctx, s.blobStor, s.metaBase, s.log) - } - s.writecacheSealCancel.Store(dummyCancel) - return nil -} - -func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { - type initializer interface { - Init(context.Context) error - } - - var components []initializer - - if !m.NoMetabase() { - var initMetabase initializer - - if s.NeedRefillMetabase() { - initMetabase = (*metabaseSynchronizer)(s) - } else { - initMetabase = s.metaBase - } - - components = []initializer{ - s.blobStor, initMetabase, - } - } else { - components = []initializer{s.blobStor} - } - - if s.hasWriteCache() && !m.NoMetabase() { - components = append(components, s.writeCache) - } - - if s.pilorama != nil { - components = append(components, s.pilorama) - } - - for _, component := range components { - if err := component.Init(ctx); err != nil { - if component == s.metaBase { - if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) { - return fmt.Errorf("metabase initialization: %w", err) - } - - err = s.handleMetabaseFailure(ctx, "init", err) - if err != nil { - return err - } - - break - } - - return fmt.Errorf("initialize %T: %w", component, err) - } - } - return nil -} - -func (s *Shard) refillMetabase(ctx context.Context) error { - path := s.metaBase.DumpInfo().Path - s.metricsWriter.SetRefillStatus(path, "running") - s.metricsWriter.SetRefillPercent(path, 0) - var success bool - defer func() { - if success { - s.metricsWriter.SetRefillStatus(path, "completed") - } else { - s.metricsWriter.SetRefillStatus(path, "failed") - } - }() - - err := s.metaBase.Reset() - if err != nil { - return fmt.Errorf("reset metabase: %w", err) - } - - withCount := true - totalObjects, err := s.blobStor.ObjectsCount(ctx) - if err != nil { - s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err)) - withCount = false - } - - eg, egCtx := errgroup.WithContext(ctx) - if s.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.refillMetabaseWorkersCount) - } - - var completedCount uint64 - var metricGuard sync.Mutex - itErr := blobstor.IterateBinaryObjects(egCtx, s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error { - eg.Go(func() error { - var success bool - defer func() { - s.metricsWriter.IncRefillObjectsCount(path, len(data), success) - if withCount { - metricGuard.Lock() - completedCount++ - s.metricsWriter.SetRefillPercent(path, uint32(completedCount*100/totalObjects)) - metricGuard.Unlock() - } - }() - - if err := s.refillObject(egCtx, data, addr, descriptor); err != nil { - return err - } - success = true - return nil - }) - - select { - case <-egCtx.Done(): - return egCtx.Err() - default: - return nil - } - }) - - egErr := eg.Wait() - - err = errors.Join(egErr, itErr) - if err != nil { - return fmt.Errorf("put objects to the meta: %w", err) - } - - err = s.metaBase.SyncCounters() - if err != nil { - return fmt.Errorf("sync object counters: %w", err) - } - - success = true - s.metricsWriter.SetRefillPercent(path, 100) - return nil -} - -func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error { - obj := objectSDK.New() - if err := obj.Unmarshal(data); err != nil { - s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject, - zap.Stringer("address", addr), - zap.Error(err)) - return nil - } - - hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0 - - var isIndexedContainer bool - if hasIndexedAttribute { - info, err := s.containerInfo.Info(ctx, addr.Container()) - if err != nil { - return err - } - if info.Removed { - s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr)) - return nil - } - isIndexedContainer = info.Indexed - } - - var err error - switch obj.Type() { - case objectSDK.TypeTombstone: - err = s.refillTombstoneObject(ctx, obj) - case objectSDK.TypeLock: - err = s.refillLockObject(ctx, obj) - default: - } - if err != nil { - return err - } - - var mPrm meta.PutPrm - mPrm.SetObject(obj) - mPrm.SetStorageID(descriptor) - mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer) - - _, err = s.metaBase.Put(ctx, mPrm) - if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) { - return err - } - return nil -} - -func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error { - var lock objectSDK.Lock - if err := lock.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("unmarshal lock content: %w", err) - } - - locked := make([]oid.ID, lock.NumberOfMembers()) - lock.ReadMembers(locked) - - cnr, _ := obj.ContainerID() - id, _ := obj.ID() - err := s.metaBase.Lock(ctx, cnr, id, locked) - if err != nil { - return fmt.Errorf("lock objects: %w", err) - } - return nil -} - -func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object) error { - tombstone := objectSDK.NewTombstone() - - if err := tombstone.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("unmarshal tombstone content: %w", err) - } - - tombAddr := object.AddressOf(obj) - memberIDs := tombstone.Members() - tombMembers := make([]oid.Address, 0, len(memberIDs)) - - for i := range memberIDs { - a := tombAddr - a.SetObject(memberIDs[i]) - - tombMembers = append(tombMembers, a) - } - - var inhumePrm meta.InhumePrm - - inhumePrm.SetTombstoneAddress(tombAddr) - inhumePrm.SetAddresses(tombMembers...) - - _, err := s.metaBase.Inhume(ctx, inhumePrm) - if err != nil { - return fmt.Errorf("inhume objects: %w", err) - } - return nil -} - -// Close releases all Shard's components. -func (s *Shard) Close(ctx context.Context) error { - unlock := s.lockExclusive() - if s.rb != nil { - s.rb.Stop(ctx, s.log) - } - var components []interface{ Close(context.Context) error } - - if s.pilorama != nil { - components = append(components, s.pilorama) - } - - if s.hasWriteCache() { - prev := s.writecacheSealCancel.Swap(notInitializedCancel) - prev.cancel() // no need to wait: writecache.Seal and writecache.Close lock the same mutex - components = append(components, s.writeCache) - } - - components = append(components, s.blobStor, s.metaBase) - - var lastErr error - for _, component := range components { - if err := component.Close(ctx); err != nil { - lastErr = err - s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err)) - } - } - - if s.opsLimiter != nil { - s.opsLimiter.Close() - } - - unlock() - - // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. - // So to prevent deadlock GC stopping is outside of exclusive lock. - // If Init/Open was unsuccessful gc can be nil. - if s.gc != nil { - s.gc.stop(ctx) - } - - return lastErr -} - -// Reload reloads configuration portions that are necessary. -// If a config option is invalid, it logs an error and returns nil. -// If there was a problem with applying new configuration, an error is returned. -func (s *Shard) Reload(ctx context.Context, opts ...Option) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Reload") - defer span.End() - - // Do not use defaultCfg here missing options need not be reloaded. - var c cfg - for i := range opts { - opts[i](&c) - } - - unlock := s.lockExclusive() - defer unlock() - - s.rb.Stop(ctx, s.log) - if !s.info.Mode.NoMetabase() { - defer func() { - s.rb.Start(ctx, s.blobStor, s.metaBase, s.log) - }() - } - - ok, err := s.metaBase.Reload(ctx, c.metaOpts...) - if err != nil { - if errors.Is(err, meta.ErrDegradedMode) { - s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) - _ = s.setMode(ctx, mode.DegradedReadOnly) - } - return err - } - if ok { - var err error - if c.refillMetabase { - // Here we refill metabase only if a new instance was opened. This is a feature, - // we don't want to hang for some time just because we forgot to change - // config after the node was updated. - err = s.refillMetabase(ctx) - } else { - err = s.metaBase.Init(ctx) - } - if err != nil { - s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) - _ = s.setMode(ctx, mode.DegradedReadOnly) - return err - } - } - if err := s.setMode(ctx, c.info.Mode); err != nil { - return err - } - s.reloadOpsLimiter(&c) - - return nil -} - -func (s *Shard) reloadOpsLimiter(c *cfg) { - if c.configOpsLimiter != nil { - old := s.opsLimiter.ptr.Swap(&qosLimiterHolder{Limiter: c.configOpsLimiter}) - old.Close() - s.opsLimiter.SetParentID(s.info.ID.String()) - } -} - -func (s *Shard) lockExclusive() func() { - s.setModeRequested.Store(true) - val := s.gcCancel.Load() - if val != nil { - cancelGC := val.(context.CancelFunc) - cancelGC() - } - if c := s.writecacheSealCancel.Load(); c != nil { - c.cancel() - } - s.m.Lock() - s.setModeRequested.Store(false) - return s.m.Unlock -} diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go deleted file mode 100644 index 6d2cd7137..000000000 --- a/pkg/local_object_storage/shard/control_test.go +++ /dev/null @@ -1,404 +0,0 @@ -package shard - -import ( - "context" - "fmt" - "io/fs" - "math" - "os" - "path/filepath" - "sync/atomic" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -type objAddr struct { - obj *objectSDK.Object - addr oid.Address -} - -func TestShardOpen(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - metaPath := filepath.Join(dir, "meta") - - st := teststore.New(teststore.WithSubstorage(fstree.New( - fstree.WithDirNameLen(2), - fstree.WithPath(filepath.Join(dir, "blob")), - fstree.WithDepth(1)), - )) - - var allowedMode atomic.Int64 - openFileMetabase := func(p string, f int, perm fs.FileMode) (*os.File, error) { - const modeMask = os.O_RDONLY | os.O_RDWR | os.O_WRONLY - if int64(f&modeMask) == allowedMode.Load() { - return os.OpenFile(p, f, perm) - } - return nil, fs.ErrPermission - } - - wcOpts := []writecache.Option{ - writecache.WithPath(filepath.Join(dir, "wc")), - } - - newShard := func() *Shard { - return New( - WithID(NewIDFromBytes([]byte{})), - WithLogger(test.NewLogger(t)), - WithBlobStorOptions( - blobstor.WithStorages([]blobstor.SubStorage{ - {Storage: st}, - })), - WithMetaBaseOptions( - meta.WithPath(metaPath), - meta.WithEpochState(epochState{}), - meta.WithBoltDBOptions(&bbolt.Options{OpenFile: openFileMetabase}), - ), - WithPiloramaOptions( - pilorama.WithPath(filepath.Join(dir, "pilorama"))), - WithWriteCache(true), - WithWriteCacheOptions(wcOpts)) - } - - allowedMode.Store(int64(os.O_RDWR)) - - sh := newShard() - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - require.Equal(t, mode.ReadWrite, sh.GetMode()) - require.NoError(t, sh.Close(context.Background())) - - // Metabase can be opened in read-only => start in ReadOnly mode. - allowedMode.Store(int64(os.O_RDONLY)) - - sh = newShard() - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - require.Equal(t, mode.ReadOnly, sh.GetMode()) - require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite)) - require.Equal(t, mode.ReadOnly, sh.GetMode()) - require.NoError(t, sh.Close(context.Background())) - - // Metabase is corrupted => start in DegradedReadOnly mode. - allowedMode.Store(math.MaxInt64) - - sh = newShard() - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - require.Equal(t, mode.DegradedReadOnly, sh.GetMode()) - require.NoError(t, sh.Close(context.Background())) -} - -func TestRefillMetabaseCorrupted(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - - fsTree := fstree.New( - fstree.WithDirNameLen(2), - fstree.WithPath(filepath.Join(dir, "blob")), - fstree.WithDepth(1)) - blobOpts := []blobstor.Option{ - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: fsTree, - }, - }), - } - - mm := newMetricStore() - - sh := New( - WithID(NewIDFromBytes([]byte{})), - WithBlobStorOptions(blobOpts...), - WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))), - WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})), - WithMetricsWriter(mm), - ) - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - - obj := objecttest.Object() - obj.SetType(objectSDK.TypeRegular) - obj.SetPayload([]byte{0, 1, 2, 3, 4, 5}) - - var putPrm PutPrm - putPrm.SetObject(obj) - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - require.NoError(t, sh.Close(context.Background())) - - addr := object.AddressOf(obj) - // This is copied from `fstree.treePath()` to avoid exporting function just for tests. - { - saddr := addr.Object().EncodeToString() + "." + addr.Container().EncodeToString() - p := fmt.Sprintf("%s/%s/%s", fsTree.RootPath, saddr[:2], saddr[2:]) - require.NoError(t, os.WriteFile(p, []byte("not an object"), fsTree.Permissions)) - } - - sh = New( - WithID(NewIDFromBytes([]byte{})), - WithBlobStorOptions(blobOpts...), - WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))), - WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta_new")), meta.WithEpochState(epochState{})), - WithRefillMetabase(true), - WithMetricsWriter(mm)) - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - - var getPrm GetPrm - getPrm.SetAddress(addr) - _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err)) - require.NoError(t, sh.Close(context.Background())) -} - -func TestRefillMetabase(t *testing.T) { - t.Parallel() - - p := t.Name() - - defer os.RemoveAll(p) - - blobOpts := []blobstor.Option{ - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(p, "blob")), - fstree.WithDepth(1)), - }, - }), - } - - mm := newMetricStore() - - sh := New( - WithID(NewIDFromBytes([]byte{})), - WithBlobStorOptions(blobOpts...), - WithMetaBaseOptions( - meta.WithPath(filepath.Join(p, "meta")), - meta.WithEpochState(epochState{}), - ), - WithPiloramaOptions( - pilorama.WithPath(filepath.Join(p, "pilorama"))), - WithMetricsWriter(mm), - ) - - // open Blobstor - require.NoError(t, sh.Open(context.Background())) - - // initialize Blobstor - require.NoError(t, sh.Init(context.Background())) - - const objNum = 5 - - mObjs := make(map[string]objAddr) - locked := make([]oid.ID, 1, 2) - locked[0] = oidtest.ID() - cnrLocked := cidtest.ID() - for range objNum { - obj := objecttest.Object() - obj.SetType(objectSDK.TypeRegular) - - if len(locked) < 2 { - obj.SetContainerID(cnrLocked) - id, _ := obj.ID() - locked = append(locked, id) - } - - addr := object.AddressOf(obj) - - mObjs[addr.EncodeToString()] = objAddr{ - obj: obj, - addr: addr, - } - } - - tombObj := objecttest.Object() - tombObj.SetType(objectSDK.TypeTombstone) - - tombstone := objecttest.Tombstone() - - tombData, err := tombstone.Marshal() - require.NoError(t, err) - - tombObj.SetPayload(tombData) - - tombMembers := make([]oid.Address, 0, len(tombstone.Members())) - - members := tombstone.Members() - for i := range tombstone.Members() { - var a oid.Address - a.SetObject(members[i]) - cnr, _ := tombObj.ContainerID() - a.SetContainer(cnr) - - tombMembers = append(tombMembers, a) - } - - var putPrm PutPrm - - for _, v := range mObjs { - putPrm.SetObject(v.obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - } - - putPrm.SetObject(tombObj) - - _, err = sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - // LOCK object handling - var lock objectSDK.Lock - lock.WriteMembers(locked) - - lockObj := objecttest.Object() - lockObj.SetContainerID(cnrLocked) - objectSDK.WriteLock(lockObj, lock) - - putPrm.SetObject(lockObj) - _, err = sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - lockID, _ := lockObj.ID() - require.NoError(t, sh.Lock(context.Background(), cnrLocked, lockID, locked)) - - var inhumePrm InhumePrm - inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...) - - _, err = sh.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - var headPrm HeadPrm - - checkObj := func(addr oid.Address, expObj *objectSDK.Object) { - headPrm.SetAddress(addr) - - res, err := sh.Head(context.Background(), headPrm) - - if expObj == nil { - require.True(t, client.IsErrObjectNotFound(err)) - return - } - - require.NoError(t, err) - require.Equal(t, expObj.CutPayload(), res.Object()) - } - - checkAllObjs := func(exists bool) { - for _, v := range mObjs { - if exists { - checkObj(v.addr, v.obj) - } else { - checkObj(v.addr, nil) - } - } - } - - checkTombMembers := func(exists bool) { - for _, member := range tombMembers { - headPrm.SetAddress(member) - - _, err := sh.Head(context.Background(), headPrm) - - if exists { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } - } - } - - checkLocked := func(t *testing.T, cnr cid.ID, locked []oid.ID) { - var addr oid.Address - addr.SetContainer(cnr) - - for i := range locked { - addr.SetObject(locked[i]) - - var prm InhumePrm - prm.MarkAsGarbage(addr) - - var target *apistatus.ObjectLocked - _, err := sh.Inhume(context.Background(), prm) - require.ErrorAs(t, err, &target, "object %s should be locked", locked[i]) - } - } - - checkAllObjs(true) - checkObj(object.AddressOf(tombObj), tombObj) - checkTombMembers(true) - checkLocked(t, cnrLocked, locked) - - c, err := sh.metaBase.ObjectCounters() - require.NoError(t, err) - - phyBefore := c.Phy - logicalBefore := c.Logic - - err = sh.Close(context.Background()) - require.NoError(t, err) - - sh = New( - WithID(NewIDFromBytes([]byte{})), - WithBlobStorOptions(blobOpts...), - WithMetaBaseOptions( - meta.WithPath(filepath.Join(p, "meta_restored")), - meta.WithEpochState(epochState{}), - ), - WithPiloramaOptions( - pilorama.WithPath(filepath.Join(p, "pilorama_another"))), - WithMetricsWriter(mm), - ) - - // open Blobstor - require.NoError(t, sh.Open(context.Background())) - - // initialize Blobstor - require.NoError(t, sh.Init(context.Background())) - - defer sh.Close(context.Background()) - - checkAllObjs(false) - checkObj(object.AddressOf(tombObj), nil) - checkTombMembers(false) - - err = sh.refillMetabase(context.Background()) - require.NoError(t, err) - - c, err = sh.metaBase.ObjectCounters() - require.NoError(t, err) - - require.Equal(t, phyBefore, c.Phy) - require.Equal(t, logicalBefore, c.Logic) - - checkAllObjs(true) - checkObj(object.AddressOf(tombObj), tombObj) - checkTombMembers(true) - checkLocked(t, cnrLocked, locked) - require.Equal(t, int64(len(mObjs)+2), mm.refillCount) // 1 lock + 1 tomb - require.Equal(t, "completed", mm.refillStatus) - require.Equal(t, uint32(100), mm.refillPercent) -} diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go deleted file mode 100644 index 8dc1f0522..000000000 --- a/pkg/local_object_storage/shard/count.go +++ /dev/null @@ -1,37 +0,0 @@ -package shard - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// LogicalObjectsCount returns logical objects count. -func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { - _, span := tracing.StartSpanFromContext(ctx, "Shard.LogicalObjectsCount", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.GetMode().NoMetabase() { - return 0, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() - - cc, err := s.metaBase.ObjectCounters() - if err != nil { - return 0, err - } - return cc.Logic, nil -} diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go deleted file mode 100644 index 0101817a8..000000000 --- a/pkg/local_object_storage/shard/delete.go +++ /dev/null @@ -1,161 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// DeletePrm groups the parameters of Delete operation. -type DeletePrm struct { - addr []oid.Address -} - -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct { - deleted uint64 -} - -// SetAddresses is a Delete option to set the addresses of the objects to delete. -// -// Option is required. -func (p *DeletePrm) SetAddresses(addr ...oid.Address) { - p.addr = append(p.addr, addr...) -} - -// Delete removes data from the shard's metaBase and// blobStor. -func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Int("addr_count", len(prm.addr)), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - return s.delete(ctx, prm, false) -} - -func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (DeleteRes, error) { - if s.info.Mode.ReadOnly() { - return DeleteRes{}, ErrReadOnlyMode - } else if s.info.Mode.NoMetabase() { - return DeleteRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return DeleteRes{}, err - } - defer release() - - result := DeleteRes{} - for _, addr := range prm.addr { - select { - case <-ctx.Done(): - return result, ctx.Err() - default: - } - - if err := s.validateWritecacheDoesntContainObject(ctx, addr); err != nil { - if skipFailed { - continue - } - return result, err - } - - if err := s.deleteFromBlobstor(ctx, addr); err != nil { - if skipFailed { - continue - } - return result, err - } - - if err := s.deleteFromMetabase(ctx, addr); err != nil { - if skipFailed { - continue - } - return result, err - } - result.deleted++ - } - - return result, nil -} - -func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr oid.Address) error { - if !s.hasWriteCache() { - return nil - } - _, err := s.writeCache.Head(ctx, addr) - if err == nil { - s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr)) - return fmt.Errorf("object %s must be flushed from writecache", addr) - } - if client.IsErrObjectNotFound(err) { - return nil - } - return err -} - -func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error { - var sPrm meta.StorageIDPrm - sPrm.SetAddress(addr) - - res, err := s.metaBase.StorageID(ctx, sPrm) - if err != nil { - s.log.Debug(ctx, logs.StorageIDRetrievalFailure, - zap.Stringer("object", addr), - zap.Error(err)) - return err - } - storageID := res.StorageID() - if storageID == nil { - // if storageID is nil it means: - // 1. there is no such object - // 2. object stored by writecache: should not happen, as `validateWritecacheDoesntContainObject` called before `deleteFromBlobstor` - return nil - } - - var delPrm common.DeletePrm - delPrm.Address = addr - delPrm.StorageID = storageID - - _, err = s.blobStor.Delete(ctx, delPrm) - if err != nil && !client.IsErrObjectNotFound(err) { - s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, - zap.Stringer("object_address", addr), - zap.Error(err)) - return err - } - return nil -} - -func (s *Shard) deleteFromMetabase(ctx context.Context, addr oid.Address) error { - var delPrm meta.DeletePrm - delPrm.SetAddresses(addr) - - res, err := s.metaBase.Delete(ctx, delPrm) - if err != nil { - return err - } - s.decObjectCounterBy(physical, res.PhyCount()) - s.decObjectCounterBy(logical, res.LogicCount()) - s.decObjectCounterBy(user, res.UserCount()) - s.decContainerObjectCounter(res.RemovedByCnrID()) - s.addToContainerSize(addr.Container().EncodeToString(), -int64(res.LogicSize())) - s.addToPayloadSize(-int64(res.PhySize())) - - return nil -} diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go deleted file mode 100644 index c9ce93bc5..000000000 --- a/pkg/local_object_storage/shard/delete_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package shard - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" -) - -func TestShard_Delete_SmallObject(t *testing.T) { - t.Run("small object without write cache", func(t *testing.T) { - t.Parallel() - testShard(t, false, 1<<5) - }) - - t.Run("small object with write cache", func(t *testing.T) { - t.Parallel() - testShard(t, true, 1<<5) - }) -} - -func TestShard_Delete_BigObject(t *testing.T) { - t.Run("big object without write cache", func(t *testing.T) { - t.Parallel() - testShard(t, false, 1<<20) - }) - - t.Run("big object with write cache", func(t *testing.T) { - t.Parallel() - testShard(t, true, 1<<20) - }) -} - -func testShard(t *testing.T, hasWriteCache bool, payloadSize int) { - sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - cnr := cidtest.ID() - - obj := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(obj, "foo", "bar") - testutil.AddPayload(obj, payloadSize) - - var putPrm PutPrm - putPrm.SetObject(obj) - - var getPrm GetPrm - getPrm.SetAddress(object.AddressOf(obj)) - - var delPrm DeletePrm - delPrm.SetAddresses(object.AddressOf(obj)) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - _, err = sh.Get(context.Background(), getPrm) - require.NoError(t, err) - - if hasWriteCache { - require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})) - } - _, err = sh.Delete(context.Background(), delPrm) - require.NoError(t, err) - - _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err)) -} diff --git a/pkg/local_object_storage/shard/errors.go b/pkg/local_object_storage/shard/errors.go deleted file mode 100644 index 045ad1bba..000000000 --- a/pkg/local_object_storage/shard/errors.go +++ /dev/null @@ -1,24 +0,0 @@ -package shard - -import ( - "errors" - - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -var ErrShardDisabled = logicerr.New("shard disabled") - -// IsErrOutOfRange checks if an error returned by Shard GetRange method -// corresponds to exceeding the object bounds. -func IsErrOutOfRange(err error) bool { - var target *apistatus.ObjectOutOfRange - return errors.As(err, &target) -} - -// IsErrObjectExpired checks if an error returned by Shard corresponds to -// expired object. -func IsErrObjectExpired(err error) bool { - return errors.Is(err, meta.ErrObjectIsExpired) -} diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go deleted file mode 100644 index 2c11b6b01..000000000 --- a/pkg/local_object_storage/shard/exists.go +++ /dev/null @@ -1,96 +0,0 @@ -package shard - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// ExistsPrm groups the parameters of Exists operation. -type ExistsPrm struct { - // Exists option to set object checked for existence. - Address oid.Address - // Exists option to set parent object checked for existence. - ECParentAddress oid.Address -} - -// ExistsRes groups the resulting values of Exists operation. -type ExistsRes struct { - ex bool - lc bool -} - -// Exists returns the fact that the object is in the shard. -func (p ExistsRes) Exists() bool { - return p.ex -} - -// Locked returns the fact that the object is locked. -func (p ExistsRes) Locked() bool { - return p.lc -} - -// Exists checks if object is presented in shard. -// -// Returns any error encountered that does not allow to -// unambiguously determine the presence of an object. -// -// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed. -// Returns the object.ErrObjectIsExpired if the object is presented but already expired. -// Returns the ErrShardDisabled if the shard is disabled. -func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", prm.Address.EncodeToString()), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.Disabled() { - return ExistsRes{}, ErrShardDisabled - } else if s.info.EvacuationInProgress { - return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ExistsRes{}, err - } - defer release() - - var exists bool - var locked bool - - if s.info.Mode.NoMetabase() { - var p common.ExistsPrm - p.Address = prm.Address - - var res common.ExistsRes - res, err = s.blobStor.Exists(ctx, p) - exists = res.Exists - } else { - var existsPrm meta.ExistsPrm - existsPrm.SetAddress(prm.Address) - existsPrm.SetECParent(prm.ECParentAddress) - - var res meta.ExistsRes - res, err = s.metaBase.Exists(ctx, existsPrm) - exists = res.Exists() - locked = res.Locked() - } - - return ExistsRes{ - ex: exists, - lc: locked, - }, err -} diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go deleted file mode 100644 index a262a52cb..000000000 --- a/pkg/local_object_storage/shard/gc.go +++ /dev/null @@ -1,809 +0,0 @@ -package shard - -import ( - "context" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -const ( - minExpiredWorkers = 2 - minExpiredBatchSize = 1 -) - -// TombstoneSource is an interface that checks -// tombstone status in the FrostFS network. -type TombstoneSource interface { - // IsTombstoneAvailable must return boolean value that means - // provided tombstone's presence in the FrostFS network at the - // time of the passed epoch. - IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool -} - -type newEpochHandler func(context.Context, uint64) - -type newEpochHandlers struct { - prevGroup sync.WaitGroup - - cancelFunc context.CancelFunc - - handlers []newEpochHandler -} - -type gcRunResult struct { - success bool - deleted uint64 - failedToDelete uint64 -} - -const ( - objectTypeLock = "lock" - objectTypeTombstone = "tombstone" - objectTypeRegular = "regular" -) - -type GCMectrics interface { - SetShardID(string) - AddRunDuration(d time.Duration, success bool) - AddDeletedCount(deleted, failed uint64) - AddExpiredObjectCollectionDuration(d time.Duration, success bool, objectType string) - AddInhumedObjectCount(count uint64, objectType string) -} - -type noopGCMetrics struct{} - -func (m *noopGCMetrics) SetShardID(string) {} -func (m *noopGCMetrics) AddRunDuration(time.Duration, bool) {} -func (m *noopGCMetrics) AddDeletedCount(uint64, uint64) {} -func (m *noopGCMetrics) AddExpiredObjectCollectionDuration(time.Duration, bool, string) {} -func (m *noopGCMetrics) AddInhumedObjectCount(uint64, string) {} - -type gc struct { - *gcCfg - - onceStop sync.Once - stopChannel chan struct{} - wg sync.WaitGroup - - workerPool util.WorkerPool - - remover func(context.Context) gcRunResult - - // newEpochChan is used only for listening for the new epoch event. - // It is ok to keep opened, we are listening for context done when writing in it. - newEpochChan chan uint64 - newEpochHandlers *newEpochHandlers -} - -type gcCfg struct { - removerInterval time.Duration - - log *logger.Logger - - workerPoolInit func(int) util.WorkerPool - - expiredCollectorWorkerCount int - expiredCollectorBatchSize int - - metrics GCMectrics - - testHookRemover func(ctx context.Context) gcRunResult -} - -func defaultGCCfg() gcCfg { - return gcCfg{ - removerInterval: 10 * time.Second, - log: logger.NewLoggerWrapper(zap.L()), - workerPoolInit: func(int) util.WorkerPool { - return nil - }, - metrics: &noopGCMetrics{}, - } -} - -func (gc *gc) init(ctx context.Context) { - gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) - gc.wg.Add(2) - go gc.tickRemover(ctx) - go gc.listenEvents(ctx) -} - -func (gc *gc) listenEvents(ctx context.Context) { - defer gc.wg.Done() - - for { - select { - case <-gc.stopChannel: - gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel) - return - case <-ctx.Done(): - gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) - return - case event, ok := <-gc.newEpochChan: - if !ok { - gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) - return - } - - gc.handleEvent(ctx, event) - } - } -} - -func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { - gc.newEpochHandlers.cancelFunc() - gc.newEpochHandlers.prevGroup.Wait() - - var runCtx context.Context - runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) - - gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) - - for i := range gc.newEpochHandlers.handlers { - select { - case <-ctx.Done(): - return - default: - } - h := gc.newEpochHandlers.handlers[i] - - err := gc.workerPool.Submit(func() { - defer gc.newEpochHandlers.prevGroup.Done() - h(runCtx, epoch) - }) - if err != nil { - gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, - zap.Error(err), - ) - - gc.newEpochHandlers.prevGroup.Done() - } - } -} - -func (gc *gc) releaseResources(ctx context.Context) { - if gc.workerPool != nil { - gc.workerPool.Release() - } - - // Avoid to close gc.eventChan here, - // because it is possible that we are close it earlier than stop writing. - // It is ok to keep it opened. - - gc.log.Debug(ctx, logs.ShardGCIsStopped) -} - -func (gc *gc) tickRemover(ctx context.Context) { - defer gc.wg.Done() - - timer := time.NewTimer(gc.removerInterval) - defer timer.Stop() - - for { - select { - case <-ctx.Done(): - // Context canceled earlier than we start to close shards. - // It make sense to stop collecting garbage by context too. - gc.releaseResources(ctx) - return - case <-gc.stopChannel: - gc.releaseResources(ctx) - return - case <-timer.C: - startedAt := time.Now() - - var result gcRunResult - if gc.testHookRemover != nil { - result = gc.testHookRemover(ctx) - } else { - result = gc.remover(ctx) - } - timer.Reset(gc.removerInterval) - - gc.metrics.AddRunDuration(time.Since(startedAt), result.success) - gc.metrics.AddDeletedCount(result.deleted, result.failedToDelete) - } - } -} - -func (gc *gc) stop(ctx context.Context) { - gc.onceStop.Do(func() { - close(gc.stopChannel) - }) - - gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) - gc.wg.Wait() - - gc.newEpochHandlers.cancelFunc() - gc.newEpochHandlers.prevGroup.Wait() -} - -// iterates over metabase and deletes objects -// with GC-marked graves. -// Does nothing if shard is in "read-only" mode. -func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { - ctx, cancel := context.WithCancel(pctx) - defer cancel() - - s.gcCancel.Store(cancel) - if s.setModeRequested.Load() { - return - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode != mode.ReadWrite { - return - } - - s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) - defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) - - buf, err := s.getGarbage(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, - zap.Error(err), - ) - - return - } else if len(buf) == 0 { - result.success = true - return - } - - var deletePrm DeletePrm - deletePrm.SetAddresses(buf...) - - // delete accumulated objects - res, err := s.delete(ctx, deletePrm, true) - - result.deleted = res.deleted - result.failedToDelete = uint64(len(buf)) - res.deleted - result.success = true - - if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects, - zap.Error(err), - ) - result.success = false - } - - return -} - -func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - - buf := make([]oid.Address, 0, s.rmBatchSize) - - var iterPrm meta.GarbageIterationPrm - iterPrm.SetHandler(func(g meta.GarbageObject) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - buf = append(buf, g.Address()) - - if len(buf) == s.rmBatchSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { - return nil, err - } - - return buf, nil -} - -func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) - return -} - -func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { - var err error - startedAt := time.Now() - - defer func() { - s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) - }() - - s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) - - workersCount, batchSize := s.getExpiredObjectsParameters() - - errGroup, egCtx := errgroup.WithContext(ctx) - errGroup.SetLimit(workersCount) - - errGroup.Go(func() error { - batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { - if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { - batch = append(batch, o.Address()) - - if len(batch) == batchSize { - expired := batch - errGroup.Go(func() error { - s.handleExpiredObjects(egCtx, expired) - return egCtx.Err() - }) - batch = make([]oid.Address, 0, batchSize) - } - } - }) - if expErr != nil { - return expErr - } - - if len(batch) > 0 { - expired := batch - errGroup.Go(func() error { - s.handleExpiredObjects(egCtx, expired) - return egCtx.Err() - }) - } - - return nil - }) - - if err = errGroup.Wait(); err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err)) - } -} - -func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) { - select { - case <-ctx.Done(): - return - default: - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return - } - - s.handleExpiredObjectsUnsafe(ctx, expired) -} - -func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { - select { - case <-ctx.Done(): - return - default: - } - - expired, err := s.getExpiredWithLinked(ctx, expired) - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) - return - } - - res, err := s.inhumeGC(ctx, expired) - if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) - return - } - - s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeRegular) - s.decObjectCounterBy(logical, res.LogicInhumed()) - s.decObjectCounterBy(user, res.UserInhumed()) - s.decContainerObjectCounter(res.InhumedByCnrID()) - - i := 0 - for i < res.GetDeletionInfoLength() { - delInfo := res.GetDeletionInfoByIndex(i) - s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size)) - i++ - } -} - -func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - - result := make([]oid.Address, 0, len(source)) - parentToChildren, err := s.metaBase.GetChildren(ctx, source) - if err != nil { - return nil, err - } - for parent, children := range parentToChildren { - result = append(result, parent) - result = append(result, children...) - } - - return result, nil -} - -func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return meta.InhumeRes{}, err - } - defer release() - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(addrs...) - inhumePrm.SetGCMark() - return s.metaBase.Inhume(ctx, inhumePrm) -} - -func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { - var err error - startedAt := time.Now() - - defer func() { - s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) - }() - - log := s.log.With(zap.Uint64("epoch", epoch)) - - log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) - defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling) - - const tssDeleteBatch = 50 - tss := make([]meta.TombstonedObject, 0, tssDeleteBatch) - tssExp := make([]meta.TombstonedObject, 0, tssDeleteBatch) - - var iterPrm meta.GraveyardIterationPrm - iterPrm.SetHandler(func(deletedObject meta.TombstonedObject) error { - tss = append(tss, deletedObject) - - if len(tss) == tssDeleteBatch { - return meta.ErrInterruptIterator - } - - return nil - }) - - for { - log.Debug(ctx, logs.ShardIteratingTombstones) - - s.m.RLock() - - if s.info.Mode.NoMetabase() { - s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) - s.m.RUnlock() - - return - } - - var release qos.ReleaseFunc - release, err = s.opsLimiter.ReadRequest(ctx) - if err != nil { - log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) - s.m.RUnlock() - return - } - err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) - release() - if err != nil { - log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) - s.m.RUnlock() - return - } - - s.m.RUnlock() - - tssLen := len(tss) - if tssLen == 0 { - break - } - - for _, ts := range tss { - if !s.tsSource.IsTombstoneAvailable(ctx, ts.Tombstone(), epoch) { - tssExp = append(tssExp, ts) - } - } - - log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) - if len(tssExp) > 0 { - s.expiredTombstonesCallback(ctx, tssExp) - } - - iterPrm.SetOffset(tss[tssLen-1].Address()) - tss = tss[:0] - tssExp = tssExp[:0] - } -} - -func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { - var err error - startedAt := time.Now() - - defer func() { - s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) - }() - - s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) - - workersCount, batchSize := s.getExpiredObjectsParameters() - - errGroup, egCtx := errgroup.WithContext(ctx) - errGroup.SetLimit(workersCount) - - errGroup.Go(func() error { - batch := make([]oid.Address, 0, batchSize) - - expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { - if o.Type() == objectSDK.TypeLock { - batch = append(batch, o.Address()) - - if len(batch) == batchSize { - expired := batch - errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, epoch, expired) - return egCtx.Err() - }) - batch = make([]oid.Address, 0, batchSize) - } - } - }) - if expErr != nil { - return expErr - } - - if len(batch) > 0 { - expired := batch - errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, epoch, expired) - return egCtx.Err() - }) - } - - return nil - }) - - if err = errGroup.Wait(); err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err)) - } -} - -func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFound func(*meta.ExpiredObject)) error { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return err - } - defer release() - - err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { - select { - case <-ctx.Done(): - return meta.ErrInterruptIterator - default: - onExpiredFound(expiredObject) - return nil - } - }) - if err != nil { - return err - } - return ctx.Err() -} - -func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - - return s.metaBase.FilterExpired(ctx, epoch, addresses) -} - -// HandleExpiredTombstones marks tombstones themselves as garbage -// and clears up corresponding graveyard records. -// -// Does not modify tss. -func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) - return - } - res, err := s.metaBase.InhumeTombstones(ctx, tss) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) - return - } - - s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeTombstone) - s.decObjectCounterBy(logical, res.LogicInhumed()) - s.decObjectCounterBy(user, res.UserInhumed()) - s.decContainerObjectCounter(res.InhumedByCnrID()) - - i := 0 - for i < res.GetDeletionInfoLength() { - delInfo := res.GetDeletionInfoByIndex(i) - s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size)) - i++ - } -} - -// HandleExpiredLocks unlocks all objects which were locked by lockers. -// If successful, marks lockers themselves as garbage. -func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) - return - } - unlocked, err := s.metaBase.FreeLockedBy(lockers) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) - - return - } - - var pInhume meta.InhumePrm - pInhume.SetAddresses(lockers...) - pInhume.SetForceGCMark() - release, err = s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) - return - } - res, err := s.metaBase.Inhume(ctx, pInhume) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) - return - } - - s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeLock) - s.decObjectCounterBy(logical, res.LogicInhumed()) - s.decObjectCounterBy(user, res.UserInhumed()) - s.decContainerObjectCounter(res.InhumedByCnrID()) - - i := 0 - for i < res.GetDeletionInfoLength() { - delInfo := res.GetDeletionInfoByIndex(i) - s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size)) - i++ - } - - s.inhumeUnlockedIfExpired(ctx, epoch, unlocked) -} - -func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) { - expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked) - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) - return - } - - if len(expiredUnlocked) == 0 { - return - } - - s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) -} - -// HandleDeletedLocks unlocks all objects which were locked by lockers. -func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) - return - } - _, err = s.metaBase.FreeLockedBy(lockers) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) - return - } -} - -// NotificationChannel returns channel for new epoch events. -func (s *Shard) NotificationChannel() chan<- uint64 { - return s.gc.newEpochChan -} - -func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { - ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") - defer span.End() - - s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) - - s.collectExpiredContainerSizeMetrics(ctx, epoch) - s.collectExpiredContainerCountMetrics(ctx, epoch) -} - -func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) - return - } - ids, err := s.metaBase.ZeroSizeContainers(ctx) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) - return - } - if len(ids) == 0 { - return - } - s.zeroSizeContainersCallback(ctx, ids) -} - -func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) - return - } - ids, err := s.metaBase.ZeroCountContainers(ctx) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) - return - } - if len(ids) == 0 { - return - } - s.zeroCountContainersCallback(ctx, ids) -} diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go deleted file mode 100644 index 54d2f1510..000000000 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package shard - -import ( - "context" - "path/filepath" - "testing" - "time" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/panjf2000/ants/v2" - "github.com/stretchr/testify/require" -) - -func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { - t.Parallel() - - rootPath := t.TempDir() - - var sh *Shard - l := test.NewLogger(t) - blobOpts := []blobstor.Option{ - blobstor.WithLogger(test.NewLogger(t)), - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), - blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(1), - blobovniczatree.WithBlobovniczaShallowWidth(1)), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return len(data) <= 1<<20 - }, - }, - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(rootPath, "blob"))), - }, - }), - } - - opts := []Option{ - WithID(NewIDFromBytes([]byte{})), - WithLogger(l), - WithBlobStorOptions(blobOpts...), - WithMetaBaseOptions( - meta.WithPath(filepath.Join(rootPath, "meta")), - meta.WithEpochState(epochState{}), - ), - WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))), - WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(ctx, addresses) - }), - WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { - sh.HandleExpiredLocks(ctx, epoch, a) - }), - WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - return pool - }), - WithGCRemoverSleepInterval(1 * time.Second), - WithDisabledGC(), - } - - sh = New(opts...) - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - cnr := cidtest.ID() - obj := testutil.GenerateObjectWithCID(cnr) - objID, _ := obj.ID() - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(objID) - - var putPrm PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - var getPrm GetPrm - getPrm.SetAddress(objectCore.AddressOf(obj)) - _, err = sh.Get(context.Background(), getPrm) - require.NoError(t, err, "failed to get") - - // inhume - var inhumePrm InhumePrm - inhumePrm.MarkAsGarbage(addr) - _, err = sh.Inhume(context.Background(), inhumePrm) - require.NoError(t, err, "failed to inhume") - _, err = sh.Get(context.Background(), getPrm) - require.Error(t, err, "get returned error") - require.True(t, client.IsErrObjectNotFound(err), "invalid error type") - - // storageID - var metaStIDPrm meta.StorageIDPrm - metaStIDPrm.SetAddress(addr) - storageID, err := sh.metaBase.StorageID(context.Background(), metaStIDPrm) - require.NoError(t, err, "failed to get storage ID") - - // check existence in blobstore - var bsExisted common.ExistsPrm - bsExisted.Address = addr - bsExisted.StorageID = storageID.StorageID() - exRes, err := sh.blobStor.Exists(context.Background(), bsExisted) - require.NoError(t, err, "failed to check blobstore existence") - require.True(t, exRes.Exists, "invalid blobstore existence result") - - // drop from blobstor - var bsDeletePrm common.DeletePrm - bsDeletePrm.Address = addr - bsDeletePrm.StorageID = storageID.StorageID() - _, err = sh.blobStor.Delete(context.Background(), bsDeletePrm) - require.NoError(t, err, "failed to delete from blobstore") - - // check existence in blobstore - exRes, err = sh.blobStor.Exists(context.Background(), bsExisted) - require.NoError(t, err, "failed to check blobstore existence") - require.False(t, exRes.Exists, "invalid blobstore existence result") - - // get should return object not found - _, err = sh.Get(context.Background(), getPrm) - require.Error(t, err, "get returned no error") - require.True(t, client.IsErrObjectNotFound(err), "invalid error type") -} diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go deleted file mode 100644 index f512a488a..000000000 --- a/pkg/local_object_storage/shard/gc_test.go +++ /dev/null @@ -1,295 +0,0 @@ -package shard - -import ( - "context" - "errors" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { - t.Parallel() - - epoch := &epochState{ - Value: 100, - } - - sh := newCustomShard(t, false, shardOptions{ - metaOptions: []meta.Option{meta.WithEpochState(epoch)}, - additionalShardOptions: []Option{WithGCWorkerPoolInitializer(func(int) util.WorkerPool { - return util.NewPseudoWorkerPool() // synchronous event processing - })}, - }) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - cnr := cidtest.ID() - - var objExpirationAttr objectSDK.Attribute - objExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch) - objExpirationAttr.SetValue("101") - - obj := testutil.GenerateObjectWithCID(cnr) - obj.SetAttributes(objExpirationAttr) - objID, _ := obj.ID() - - var lockExpirationAttr objectSDK.Attribute - lockExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch) - lockExpirationAttr.SetValue("103") - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - lock.SetAttributes(lockExpirationAttr) - lockID, _ := lock.ID() - - var putPrm PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID}) - require.NoError(t, err) - - putPrm.SetObject(lock) - _, err = sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - epoch.Value = 105 - sh.gc.handleEvent(context.Background(), epoch.Value) - - var getPrm GetPrm - getPrm.SetAddress(objectCore.AddressOf(obj)) - _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired object must be deleted") -} - -func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { - t.Parallel() - - epoch := &epochState{ - Value: 100, - } - - cnr := cidtest.ID() - parentID := oidtest.ID() - splitID := objectSDK.NewSplitID() - - var objExpirationAttr objectSDK.Attribute - objExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch) - objExpirationAttr.SetValue("101") - - var lockExpirationAttr objectSDK.Attribute - lockExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch) - lockExpirationAttr.SetValue("103") - - parent := testutil.GenerateObjectWithCID(cnr) - parent.SetID(parentID) - parent.SetPayload(nil) - parent.SetAttributes(objExpirationAttr) - - const childCount = 10 - children := make([]*objectSDK.Object, childCount) - childIDs := make([]oid.ID, childCount) - for i := range children { - children[i] = testutil.GenerateObjectWithCID(cnr) - if i != 0 { - children[i].SetPreviousID(childIDs[i-1]) - } - if i == len(children)-1 { - children[i].SetParent(parent) - } - children[i].SetSplitID(splitID) - children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)}) - childIDs[i], _ = children[i].ID() - } - - link := testutil.GenerateObjectWithCID(cnr) - link.SetParent(parent) - link.SetParentID(parentID) - link.SetSplitID(splitID) - link.SetChildren(childIDs...) - - linkID, _ := link.ID() - - sh := newCustomShard(t, false, shardOptions{ - metaOptions: []meta.Option{meta.WithEpochState(epoch)}, - additionalShardOptions: []Option{WithGCWorkerPoolInitializer(func(int) util.WorkerPool { - return util.NewPseudoWorkerPool() // synchronous event processing - })}, - }) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - lock.SetAttributes(lockExpirationAttr) - lockID, _ := lock.ID() - - var putPrm PutPrm - - for _, child := range children { - putPrm.SetObject(child) - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - } - - putPrm.SetObject(link) - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - err = sh.Lock(context.Background(), cnr, lockID, append(childIDs, parentID, linkID)) - require.NoError(t, err) - - putPrm.SetObject(lock) - _, err = sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - var getPrm GetPrm - getPrm.SetAddress(objectCore.AddressOf(parent)) - - _, err = sh.Get(context.Background(), getPrm) - var splitInfoError *objectSDK.SplitInfoError - require.True(t, errors.As(err, &splitInfoError), "split info must be provided") - - epoch.Value = 105 - sh.gc.handleEvent(context.Background(), epoch.Value) - - _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") -} - -func TestGCDropsObjectInhumedFromWritecache(t *testing.T) { - t.Parallel() - - t.Run("flush write-cache before inhume", func(t *testing.T) { - t.Parallel() - testGCDropsObjectInhumedFromWritecache(t, true) - }) - - t.Run("don't flush write-cache before inhume", func(t *testing.T) { - t.Parallel() - testGCDropsObjectInhumedFromWritecache(t, false) - }) -} - -func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) { - sh := newCustomShard(t, true, shardOptions{ - additionalShardOptions: []Option{WithDisabledGC()}, - wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()}, - }) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - obj := testutil.GenerateObjectWithSize(1024) - - var putPrm PutPrm - putPrm.SetObject(obj) - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - // writecache stores object - wcObj, err := sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj)) - require.NoError(t, err) - require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj)) - - // blobstore doesn't store object - bsRes, err := sh.blobStor.Get(context.Background(), common.GetPrm{ - Address: objectCore.AddressOf(obj), - }) - require.ErrorAs(t, err, new(*apistatus.ObjectNotFound)) - require.Nil(t, bsRes.Object) - require.Nil(t, bsRes.RawData) - - if flushbeforeInhume { - sh.writeCache.Flush(context.Background(), false, false) - } - - var inhumePrm InhumePrm - inhumePrm.MarkAsGarbage(objectCore.AddressOf(obj)) - _, err = sh.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - // writecache doesn't store object - wcObj, err = sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj)) - require.Error(t, err) - require.Nil(t, wcObj) - - if flushbeforeInhume { - // blobstore store object - bsRes, err = sh.blobStor.Get(context.Background(), common.GetPrm{ - Address: objectCore.AddressOf(obj), - }) - require.NoError(t, err) - require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(bsRes.Object)) - } else { - - // blobstore doesn't store object - bsRes, err = sh.blobStor.Get(context.Background(), common.GetPrm{ - Address: objectCore.AddressOf(obj), - }) - require.ErrorAs(t, err, new(*apistatus.ObjectNotFound)) - require.Nil(t, bsRes.Object) - require.Nil(t, bsRes.RawData) - } - - gcRes := sh.removeGarbage(context.Background()) - require.True(t, gcRes.success) - require.Equal(t, uint64(1), gcRes.deleted) -} - -func TestGCDontDeleteObjectFromWritecache(t *testing.T) { - sh := newCustomShard(t, true, shardOptions{ - additionalShardOptions: []Option{WithDisabledGC()}, - wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()}, - }) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - obj := testutil.GenerateObjectWithSize(1024) - - var putPrm PutPrm - putPrm.SetObject(obj) - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - // writecache stores object - wcObj, err := sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj)) - require.NoError(t, err) - require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj)) - - // blobstore doesn't store object - bsRes, err := sh.blobStor.Get(context.Background(), common.GetPrm{ - Address: objectCore.AddressOf(obj), - }) - require.ErrorAs(t, err, new(*apistatus.ObjectNotFound)) - require.Nil(t, bsRes.Object) - require.Nil(t, bsRes.RawData) - - var metaInhumePrm meta.InhumePrm - metaInhumePrm.SetAddresses(objectCore.AddressOf(obj)) - metaInhumePrm.SetLockObjectHandling() - metaInhumePrm.SetGCMark() - _, err = sh.metaBase.Inhume(context.Background(), metaInhumePrm) - require.NoError(t, err) - - // logs: WARN shard/delete.go:98 can't remove object: object must be flushed from writecache - gcRes := sh.removeGarbage(context.Background()) - require.True(t, gcRes.success) - require.Equal(t, uint64(0), gcRes.deleted) - - // writecache stores object - wcObj, err = sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj)) - require.NoError(t, err) - require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj)) -} diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go deleted file mode 100644 index 28f8912be..000000000 --- a/pkg/local_object_storage/shard/get.go +++ /dev/null @@ -1,195 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// storFetcher is a type to unify object fetching mechanism in `fetchObjectData` -// method. It represents generalization of `getSmall` and `getBig` methods. -type storFetcher = func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) - -// GetPrm groups the parameters of Get operation. -type GetPrm struct { - addr oid.Address - skipMeta bool - skipEvacCheck bool -} - -// GetRes groups the resulting values of Get operation. -type GetRes struct { - obj *objectSDK.Object - hasMeta bool -} - -// SetAddress is a Get option to set the address of the requested object. -// -// Option is required. -func (p *GetPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetIgnoreMeta is a Get option try to fetch object from blobstor directly, -// without accessing metabase. -func (p *GetPrm) SetIgnoreMeta(ignore bool) { - p.skipMeta = ignore -} - -// SkipEvacCheck is a Get option which instruct to skip check is evacuation in progress. -func (p *GetPrm) SkipEvacCheck(val bool) { - p.skipEvacCheck = val -} - -// Object returns the requested object. -func (r GetRes) Object() *objectSDK.Object { - return r.obj -} - -// HasMeta returns true if info about the object was found in the metabase. -func (r GetRes) HasMeta() bool { - return r.hasMeta -} - -// Get reads an object from shard. -// -// Returns any error encountered that -// did not allow to completely read the object part. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in shard. -// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard. -// Returns the object.ErrObjectIsExpired if the object is presented but already expired. -// Returns the ErrShardDisabled if the shard is disabled. -func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Get", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", prm.addr.EncodeToString()), - attribute.Bool("skip_meta", prm.skipMeta), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.Disabled() { - return GetRes{}, ErrShardDisabled - } - - if s.info.EvacuationInProgress && !prm.skipEvacCheck { - return GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) { - var getPrm common.GetPrm - getPrm.Address = prm.addr - getPrm.StorageID = id - - res, err := stor.Get(ctx, getPrm) - if err != nil { - return nil, err - } - - return res.Object, nil - } - - wc := func(c writecache.Cache) (*objectSDK.Object, error) { - return c.Get(ctx, prm.addr) - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return GetRes{}, err - } - defer release() - - skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() - obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) - - return GetRes{ - obj: obj, - hasMeta: hasMeta, - }, err -} - -// emptyStorageID is an empty storageID that indicates that -// an object is big (and is stored in an FSTree, not in a blobovnicza). -var emptyStorageID = make([]byte, 0) - -// fetchObjectData looks through writeCache and blobStor to find object. -func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) { - var ( - mErr error - mRes meta.ExistsRes - ) - - if !skipMeta { - var mPrm meta.ExistsPrm - mPrm.SetAddress(addr) - mRes, mErr = s.metaBase.Exists(ctx, mPrm) - if mErr != nil && !s.info.Mode.NoMetabase() { - return nil, false, mErr - } - - if !mRes.Exists() { - return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - } else { - s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) - } - - if s.hasWriteCache() { - res, err := wc(s.writeCache) - if err == nil || IsErrOutOfRange(err) { - return res, false, err - } - if client.IsErrObjectNotFound(err) { - s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache, - zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta)) - } else { - s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache, - zap.Error(err), - zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta)) - } - } - if skipMeta || mErr != nil { - res, err := cb(s.blobStor, nil) - return res, false, err - } - - var mPrm meta.StorageIDPrm - mPrm.SetAddress(addr) - - mExRes, err := s.metaBase.StorageID(ctx, mPrm) - if err != nil { - return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err) - } - - storageID := mExRes.StorageID() - if storageID == nil { - // `nil` storageID returned without any error - // means that object is big, `cb` expects an - // empty (but non-nil) storageID in such cases - storageID = emptyStorageID - } - - res, err := cb(s.blobStor, storageID) - - return res, true, err -} diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go deleted file mode 100644 index 837991b73..000000000 --- a/pkg/local_object_storage/shard/get_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package shard - -import ( - "bytes" - "context" - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func TestShard_Get(t *testing.T) { - t.Parallel() - - t.Run("without write cache", func(t *testing.T) { - t.Parallel() - testShardGet(t, false) - }) - - t.Run("with write cache", func(t *testing.T) { - t.Parallel() - testShardGet(t, true) - }) -} - -func testShardGet(t *testing.T, hasWriteCache bool) { - sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - var putPrm PutPrm - var getPrm GetPrm - - t.Run("small object", func(t *testing.T) { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - testutil.AddPayload(obj, 1<<5) - - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - getPrm.SetAddress(object.AddressOf(obj)) - - res, err := sh.Get(context.Background(), getPrm) - require.NoError(t, err) - require.Equal(t, obj, res.Object()) - }) - - t.Run("big object", func(t *testing.T) { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - obj.SetID(oidtest.ID()) - testutil.AddPayload(obj, 1<<20) // big obj - - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - getPrm.SetAddress(object.AddressOf(obj)) - - res, err := sh.Get(context.Background(), getPrm) - require.NoError(t, err) - require.Equal(t, obj, res.Object()) - }) - - t.Run("parent object", func(t *testing.T) { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - cnr := cidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(parent, "parent", "attribute") - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - child.SetSplitID(splitID) - testutil.AddPayload(child, 1<<5) - - putPrm.SetObject(child) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - getPrm.SetAddress(object.AddressOf(child)) - - res, err := sh.Get(context.Background(), getPrm) - require.NoError(t, err) - require.True(t, binaryEqual(child, res.Object())) - - getPrm.SetAddress(object.AddressOf(parent)) - - _, err = sh.Get(context.Background(), getPrm) - - var si *objectSDK.SplitInfoError - require.True(t, errors.As(err, &si)) - - _, ok := si.SplitInfo().Link() - require.False(t, ok) - id1, _ := child.ID() - id2, _ := si.SplitInfo().LastPart() - require.Equal(t, id1, id2) - require.Equal(t, splitID, si.SplitInfo().SplitID()) - }) -} - -// binary equal is used when object contains empty lists in the structure and -// requre.Equal fails on comparing and []{} lists. -func binaryEqual(a, b *objectSDK.Object) bool { - binaryA, err := a.Marshal() - if err != nil { - return false - } - - binaryB, err := b.Marshal() - if err != nil { - return false - } - - return bytes.Equal(binaryA, binaryB) -} diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go deleted file mode 100644 index 34b8290d6..000000000 --- a/pkg/local_object_storage/shard/head.go +++ /dev/null @@ -1,98 +0,0 @@ -package shard - -import ( - "context" - - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// HeadPrm groups the parameters of Head operation. -type HeadPrm struct { - addr oid.Address - raw bool - ShardLooksBad bool -} - -// HeadRes groups the resulting values of Head operation. -type HeadRes struct { - obj *objectSDK.Object -} - -// SetAddress is a Head option to set the address of the requested object. -// -// Option is required. -func (p *HeadPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetRaw is a Head option to set raw flag value. If flag is unset, then Head -// returns header of virtual object, otherwise it returns SplitInfo of virtual -// object. -func (p *HeadPrm) SetRaw(raw bool) { - p.raw = raw -} - -// Object returns the requested object header. -func (r HeadRes) Object() *objectSDK.Object { - return r.obj -} - -// Head reads header of the object from the shard. -// -// Returns any error encountered. -// -// Returns an error of type apistatus.ObjectNotFound if object is missing in Shard. -// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard. -// Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Head", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", prm.addr.EncodeToString()), - attribute.Bool("raw", prm.raw), - )) - defer span.End() - - var obj *objectSDK.Object - var err error - mode := s.GetMode() - if mode.NoMetabase() || (mode.ReadOnly() && prm.ShardLooksBad) { - var getPrm GetPrm - getPrm.SetAddress(prm.addr) - getPrm.SetIgnoreMeta(true) - - var res GetRes - res, err = s.Get(ctx, getPrm) - obj = res.Object() - } else { - s.m.RLock() - defer s.m.RUnlock() - if s.info.EvacuationInProgress { - return HeadRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - var headParams meta.GetPrm - headParams.SetAddress(prm.addr) - headParams.SetRaw(prm.raw) - - release, limitErr := s.opsLimiter.ReadRequest(ctx) - if limitErr != nil { - return HeadRes{}, limitErr - } - defer release() - - var res meta.GetRes - res, err = s.metaBase.Get(ctx, headParams) - obj = res.Header() - } - - return HeadRes{ - obj: obj, - }, err -} diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go deleted file mode 100644 index deb3019df..000000000 --- a/pkg/local_object_storage/shard/head_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package shard - -import ( - "context" - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestShard_Head(t *testing.T) { - t.Parallel() - - t.Run("without write cache", func(t *testing.T) { - t.Parallel() - testShardHead(t, false) - }) - - t.Run("with write cache", func(t *testing.T) { - t.Parallel() - testShardHead(t, true) - }) -} - -func testShardHead(t *testing.T, hasWriteCache bool) { - sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - var putPrm PutPrm - var headPrm HeadPrm - - t.Run("regular object", func(t *testing.T) { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - headPrm.SetAddress(object.AddressOf(obj)) - - res, err := sh.Head(context.Background(), headPrm) - require.NoError(t, err) - require.Equal(t, obj.CutPayload(), res.Object()) - }) - - t.Run("virtual object", func(t *testing.T) { - cnr := cidtest.ID() - splitID := objectSDK.NewSplitID() - - parent := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(parent, "foo", "bar") - - child := testutil.GenerateObjectWithCID(cnr) - child.SetParent(parent) - idParent, _ := parent.ID() - child.SetParentID(idParent) - child.SetSplitID(splitID) - - putPrm.SetObject(child) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - headPrm.SetAddress(object.AddressOf(parent)) - headPrm.SetRaw(true) - - var siErr *objectSDK.SplitInfoError - - _, err = sh.Head(context.Background(), headPrm) - require.True(t, errors.As(err, &siErr)) - - headPrm.SetAddress(object.AddressOf(parent)) - headPrm.SetRaw(false) - - head, err := sh.Head(context.Background(), headPrm) - require.NoError(t, err) - require.Equal(t, parent.CutPayload(), head.Object()) - }) -} diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go deleted file mode 100644 index 7391adef2..000000000 --- a/pkg/local_object_storage/shard/id.go +++ /dev/null @@ -1,72 +0,0 @@ -package shard - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/mr-tron/base58" - "go.uber.org/zap" -) - -// ID represents Shard identifier. -// -// Each shard should have the unique ID within -// a single instance of local storage. -type ID []byte - -// NewIDFromBytes constructs ID from byte slice. -func NewIDFromBytes(v []byte) *ID { - return (*ID)(&v) -} - -func (id ID) String() string { - return base58.Encode(id) -} - -// ID returns Shard identifier. -func (s *Shard) ID() *ID { - return s.info.ID -} - -// UpdateID reads shard ID saved in the metabase and updates it if it is missing. -func (s *Shard) UpdateID(ctx context.Context) (err error) { - var idFromMetabase []byte - modeDegraded := s.GetMode().NoMetabase() - if !modeDegraded { - if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil { - err = fmt.Errorf("read shard id from metabase: %w", err) - } - } - - if len(idFromMetabase) != 0 { - s.info.ID = NewIDFromBytes(idFromMetabase) - } - - shardID := s.info.ID.String() - s.metricsWriter.SetShardID(shardID) - if s.writeCache != nil && s.writeCache.GetMetrics() != nil { - s.writeCache.GetMetrics().SetShardID(shardID) - } - - s.log = s.log.With(zap.Stringer("shard_id", s.info.ID)) - s.metaBase.SetLogger(s.log) - s.blobStor.SetLogger(s.log) - if s.hasWriteCache() { - s.writeCache.SetLogger(s.log) - } - s.metaBase.SetParentID(s.info.ID.String()) - s.blobStor.SetParentID(s.info.ID.String()) - if s.pilorama != nil { - s.pilorama.SetParentID(s.info.ID.String()) - } - s.opsLimiter.SetParentID(s.info.ID.String()) - - if len(idFromMetabase) == 0 && !modeDegraded { - if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { - err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr)) - } - } - return -} diff --git a/pkg/local_object_storage/shard/info.go b/pkg/local_object_storage/shard/info.go deleted file mode 100644 index f01796ec7..000000000 --- a/pkg/local_object_storage/shard/info.go +++ /dev/null @@ -1,41 +0,0 @@ -package shard - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" -) - -// Info groups the information about Shard. -type Info struct { - // Identifier of the shard. - ID *ID - - // Shard mode. - Mode mode.Mode - - // True when evacuation is in progress. - EvacuationInProgress bool - - // Information about the metabase. - MetaBaseInfo meta.Info - - // Information about the BLOB storage. - BlobStorInfo blobstor.Info - - // Information about the Write Cache. - WriteCacheInfo writecache.Info - - // ErrorCount contains amount of errors occurred in shard operations. - ErrorCount uint32 - - // PiloramaInfo contains information about trees stored on this shard. - PiloramaInfo pilorama.Info -} - -// DumpInfo returns information about the Shard. -func (s *Shard) DumpInfo() Info { - return s.info -} diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go deleted file mode 100644 index c0fd65f4b..000000000 --- a/pkg/local_object_storage/shard/inhume.go +++ /dev/null @@ -1,144 +0,0 @@ -package shard - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// InhumePrm encapsulates parameters for inhume operation. -type InhumePrm struct { - target []oid.Address - tombstone *oid.Address - forceRemoval bool -} - -// InhumeRes encapsulates results of inhume operation. -type InhumeRes struct{} - -// SetTarget sets a list of objects that should be inhumed and tombstone address -// as the reason for inhume operation. -// -// tombstone should not be nil, addr should not be empty. -// Should not be called along with MarkAsGarbage. -func (p *InhumePrm) SetTarget(tombstone oid.Address, addrs ...oid.Address) { - p.target = addrs - p.tombstone = &tombstone -} - -// MarkAsGarbage marks object to be physically removed from shard. -// -// Should not be called along with SetTarget. -func (p *InhumePrm) MarkAsGarbage(addr ...oid.Address) { - if p != nil { - p.target = addr - p.tombstone = nil - } -} - -// ForceRemoval forces object removing despite any restrictions imposed -// on deleting that object. Expected to be used only in control service. -func (p *InhumePrm) ForceRemoval() { - if p != nil { - p.tombstone = nil - p.forceRemoval = true - } -} - -// ErrLockObjectRemoval is returned when inhume operation is being -// performed on lock object, and it is not a forced object removal. -var ErrLockObjectRemoval = meta.ErrLockObjectRemoval - -// Inhume calls metabase. Inhume method to mark object as removed. It won't be -// removed physically from blobStor and metabase until `Delete` operation. -// -// Allows inhuming non-locked objects only. Returns apistatus.ObjectLocked -// if at least one object is locked. -// -// Returns ErrReadOnlyMode error if shard is in "read-only" mode. -func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Inhume", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - )) - defer span.End() - - s.m.RLock() - - if s.info.Mode.ReadOnly() { - s.m.RUnlock() - return InhumeRes{}, ErrReadOnlyMode - } else if s.info.Mode.NoMetabase() { - s.m.RUnlock() - return InhumeRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return InhumeRes{}, err - } - defer release() - - if s.hasWriteCache() { - for i := range prm.target { - _ = s.writeCache.Delete(ctx, prm.target[i]) - } - } - - var metaPrm meta.InhumePrm - metaPrm.SetAddresses(prm.target...) - metaPrm.SetLockObjectHandling() - - if prm.tombstone != nil { - metaPrm.SetTombstoneAddress(*prm.tombstone) - } else { - metaPrm.SetGCMark() - } - - if prm.forceRemoval { - metaPrm.SetForceGCMark() - } - - res, err := s.metaBase.Inhume(ctx, metaPrm) - if err != nil { - if errors.Is(err, meta.ErrLockObjectRemoval) { - s.m.RUnlock() - return InhumeRes{}, ErrLockObjectRemoval - } - - s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, - zap.Error(err), - ) - - s.m.RUnlock() - - return InhumeRes{}, fmt.Errorf("metabase inhume: %w", err) - } - - s.m.RUnlock() - - s.decObjectCounterBy(logical, res.LogicInhumed()) - s.decObjectCounterBy(user, res.UserInhumed()) - s.decContainerObjectCounter(res.InhumedByCnrID()) - - i := 0 - for i < res.GetDeletionInfoLength() { - delInfo := res.GetDeletionInfoByIndex(i) - s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size)) - i++ - } - - if deletedLockObjs := res.DeletedLockObjects(); len(deletedLockObjs) != 0 { - s.deletedLockCallBack(ctx, deletedLockObjs) - } - - return InhumeRes{}, nil -} diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go deleted file mode 100644 index 1421f0e18..000000000 --- a/pkg/local_object_storage/shard/inhume_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package shard - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" -) - -func TestShard_Inhume(t *testing.T) { - t.Parallel() - - t.Run("without write cache", func(t *testing.T) { - t.Parallel() - testShardInhume(t, false) - }) - - t.Run("with write cache", func(t *testing.T) { - t.Parallel() - testShardInhume(t, true) - }) -} - -func testShardInhume(t *testing.T, hasWriteCache bool) { - sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - cnr := cidtest.ID() - - obj := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(obj, "foo", "bar") - - ts := testutil.GenerateObjectWithCID(cnr) - - var putPrm PutPrm - putPrm.SetObject(obj) - - var inhPrm InhumePrm - inhPrm.SetTarget(object.AddressOf(ts), object.AddressOf(obj)) - - var getPrm GetPrm - getPrm.SetAddress(object.AddressOf(obj)) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - _, err = sh.Get(context.Background(), getPrm) - require.NoError(t, err) - - _, err = sh.Inhume(context.Background(), inhPrm) - require.NoError(t, err) - - _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectAlreadyRemoved(err)) -} diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go deleted file mode 100644 index af87981ca..000000000 --- a/pkg/local_object_storage/shard/list.go +++ /dev/null @@ -1,299 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// Cursor is a type for continuous object listing. -type Cursor = meta.Cursor - -// ErrEndOfListing is returned from object listing with cursor -// when storage can't return any more objects after provided -// cursor. Use nil cursor object to start listing again. -var ErrEndOfListing = meta.ErrEndOfListing - -type ListContainersPrm struct{} - -type ListContainersRes struct { - containers []cid.ID -} - -func (r ListContainersRes) Containers() []cid.ID { - return r.containers -} - -// IterateOverContainersPrm contains parameters for IterateOverContainers operation. -type IterateOverContainersPrm struct { - // Handler function executed upon containers in db. - Handler func(context.Context, objectSDK.Type, cid.ID) error -} - -// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. -type IterateOverObjectsInContainerPrm struct { - // ObjectType type of objects to iterate over. - ObjectType objectSDK.Type - // ContainerID container for objects to iterate over. - ContainerID cid.ID - // Handler function executed upon objects in db. - Handler func(context.Context, *objectcore.Info) error -} - -// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation. -type CountAliveObjectsInContainerPrm struct { - // ObjectType type of objects to iterate over. - ObjectType objectSDK.Type - // ContainerID container for objects to iterate over. - ContainerID cid.ID -} - -// ListWithCursorPrm contains parameters for ListWithCursor operation. -type ListWithCursorPrm struct { - count uint32 - cursor *Cursor -} - -// ListWithCursorRes contains values returned from ListWithCursor operation. -type ListWithCursorRes struct { - addrList []objectcore.Info - cursor *Cursor -} - -// WithCount sets maximum amount of addresses that ListWithCursor should return. -func (p *ListWithCursorPrm) WithCount(count uint32) { - p.count = count -} - -// WithCursor sets cursor for ListWithCursor operation. For initial request, -// ignore this param or use nil value. For consecutive requests, use value -// from ListWithCursorRes. -func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) { - p.cursor = cursor -} - -// AddressList returns addresses selected by ListWithCursor operation. -func (r ListWithCursorRes) AddressList() []objectcore.Info { - return r.addrList -} - -// Cursor returns cursor for consecutive listing requests. -func (r ListWithCursorRes) Cursor() *Cursor { - return r.cursor -} - -// List returns all objects physically stored in the Shard. -func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.List", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return SelectRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return SelectRes{}, err - } - defer release() - - lst, err := s.metaBase.Containers(ctx) - if err != nil { - return res, fmt.Errorf("list stored containers: %w", err) - } - - filters := objectSDK.NewSearchFilters() - filters.AddPhyFilter() - - for i := range lst { - var sPrm meta.SelectPrm - sPrm.SetContainerID(lst[i]) - sPrm.SetFilters(filters) - - sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase - if err != nil { - s.log.Debug(ctx, logs.ShardCantSelectAllObjects, - zap.Stringer("cid", lst[i]), - zap.Error(err)) - - continue - } - - res.addrList = append(res.addrList, sRes.AddressList()...) - } - - return res, nil -} - -func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListContainersRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ListContainers", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - )) - defer span.End() - - if s.GetMode().NoMetabase() { - return ListContainersRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ListContainersRes{}, err - } - defer release() - - containers, err := s.metaBase.Containers(ctx) - if err != nil { - return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) - } - - return ListContainersRes{ - containers: containers, - }, nil -} - -// ListWithCursor lists physical objects available in shard starting from -// cursor. Includes regular, tombstone and storage group objects. Does not -// include inhumed objects. Use cursor value from response for consecutive requests. -// -// Returns ErrEndOfListing if there are no more objects to return or count -// parameter set to zero. -func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) { - _, span := tracing.StartSpanFromContext(ctx, "shard.ListWithCursor", - trace.WithAttributes( - attribute.Int64("count", int64(prm.count)), - attribute.Bool("has_cursor", prm.cursor != nil), - )) - defer span.End() - - if s.GetMode().NoMetabase() { - return ListWithCursorRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ListWithCursorRes{}, err - } - defer release() - - var metaPrm meta.ListPrm - metaPrm.SetCount(prm.count) - metaPrm.SetCursor(prm.cursor) - res, err := s.metaBase.ListWithCursor(ctx, metaPrm) - if err != nil { - return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err) - } - - return ListWithCursorRes{ - addrList: res.AddressList(), - cursor: res.Cursor(), - }, nil -} - -// IterateOverContainers lists physical containers presented in shard. -func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error { - _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers", - trace.WithAttributes( - attribute.Bool("has_handler", prm.Handler != nil), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return err - } - defer release() - - var metaPrm meta.IterateOverContainersPrm - metaPrm.Handler = prm.Handler - err = s.metaBase.IterateOverContainers(ctx, metaPrm) - if err != nil { - return fmt.Errorf("iterate over containers: %w", err) - } - - return nil -} - -// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name. -func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error { - _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer", - trace.WithAttributes( - attribute.Bool("has_handler", prm.Handler != nil), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return err - } - defer release() - - var metaPrm meta.IterateOverObjectsInContainerPrm - metaPrm.ContainerID = prm.ContainerID - metaPrm.ObjectType = prm.ObjectType - metaPrm.Handler = prm.Handler - err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) - if err != nil { - return fmt.Errorf("iterate over objects: %w", err) - } - - return nil -} - -// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage. -func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) { - _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket") - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return 0, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() - - var metaPrm meta.CountAliveObjectsInContainerPrm - metaPrm.ObjectType = prm.ObjectType - metaPrm.ContainerID = prm.ContainerID - count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm) - if err != nil { - return 0, fmt.Errorf("count alive objects in bucket: %w", err) - } - - return count, nil -} diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go deleted file mode 100644 index 139b2e316..000000000 --- a/pkg/local_object_storage/shard/list_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package shard - -import ( - "context" - "sync" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestShard_List(t *testing.T) { - t.Parallel() - - t.Run("without write cache", func(t *testing.T) { - t.Parallel() - sh := newShard(t, false) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - testShardList(t, sh) - }) - - t.Run("with write cache", func(t *testing.T) { - t.Parallel() - shWC := newShard(t, true) - defer func() { require.NoError(t, shWC.Close(context.Background())) }() - testShardList(t, shWC) - }) -} - -func testShardList(t *testing.T, sh *Shard) { - const C = 10 - const N = 5 - - var mtx sync.Mutex - objs := make(map[string]int) - - var errG errgroup.Group - errG.SetLimit(C * N) - for range C { - errG.Go(func() error { - cnr := cidtest.ID() - - for range N { - errG.Go(func() error { - obj := testutil.GenerateObjectWithCID(cnr) - testutil.AddPayload(obj, 1<<2) - - // add parent as virtual object, it must be ignored in List() - parent := testutil.GenerateObjectWithCID(cnr) - idParent, _ := parent.ID() - obj.SetParentID(idParent) - obj.SetParent(parent) - - mtx.Lock() - objs[object.AddressOf(obj).EncodeToString()] = 0 - mtx.Unlock() - - var putPrm PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - return err - }) - } - return nil - }) - } - require.NoError(t, errG.Wait()) - - res, err := sh.List(context.Background()) - require.NoError(t, err) - - for _, objID := range res.AddressList() { - i, ok := objs[objID.EncodeToString()] - require.True(t, ok) - require.Equal(t, 0, i) - - objs[objID.EncodeToString()] = 1 - } -} diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go deleted file mode 100644 index 9c392fdac..000000000 --- a/pkg/local_object_storage/shard/lock.go +++ /dev/null @@ -1,109 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Lock marks objects as locked with another object. All objects from the -// specified container. -// -// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject). -// -// Locked list should be unique. Panics if it is empty. -func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Lock", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", idCnr.EncodeToString()), - attribute.String("locker", locker.EncodeToString()), - attribute.Int("locked_count", len(locked)), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - m := s.info.Mode - if m.ReadOnly() { - return ErrReadOnlyMode - } else if m.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - - err = s.metaBase.Lock(ctx, idCnr, locker, locked) - if err != nil { - return fmt.Errorf("metabase lock: %w", err) - } - - return nil -} - -// IsLocked checks object locking relation of the provided object. Not found object is -// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise. -func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.IsLocked", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - m := s.GetMode() - if m.NoMetabase() { - return false, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return false, err - } - defer release() - - var prm meta.IsLockedPrm - prm.SetAddress(addr) - - res, err := s.metaBase.IsLocked(ctx, prm) - if err != nil { - return false, err - } - - return res.Locked(), nil -} - -// GetLocks return lock id's of the provided object. Not found object is -// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise. -func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - m := s.GetMode() - if m.NoMetabase() { - return nil, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - - return s.metaBase.GetLocks(ctx, addr) -} diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go deleted file mode 100644 index 3878a65cd..000000000 --- a/pkg/local_object_storage/shard/lock_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package shard - -import ( - "context" - "path/filepath" - "testing" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func TestShard_Lock(t *testing.T) { - t.Parallel() - - var sh *Shard - - rootPath := t.TempDir() - l := logger.NewLoggerWrapper(zap.NewNop()) - opts := []Option{ - WithID(NewIDFromBytes([]byte{})), - WithLogger(l), - WithBlobStorOptions( - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(2), - blobovniczatree.WithBlobovniczaShallowWidth(2)), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return len(data) <= 1<<20 - }, - }, - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(rootPath, "blob"))), - }, - }), - ), - WithMetaBaseOptions( - meta.WithPath(filepath.Join(rootPath, "meta")), - meta.WithEpochState(epochState{}), - ), - WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(ctx, addresses) - }), - } - - sh = New(opts...) - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - cnr := cidtest.ID() - obj := testutil.GenerateObjectWithCID(cnr) - objID, _ := obj.ID() - - lock := testutil.GenerateObjectWithCID(cnr) - lock.SetType(objectSDK.TypeLock) - lockID, _ := lock.ID() - - // put the object - - var putPrm PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - // lock the object - - err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID}) - require.NoError(t, err) - - putPrm.SetObject(lock) - _, err = sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - t.Run("inhuming locked objects", func(t *testing.T) { - ts := testutil.GenerateObjectWithCID(cnr) - - var inhumePrm InhumePrm - inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(obj)) - - var objLockedErr *apistatus.ObjectLocked - - _, err = sh.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - - inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - _, err = sh.Inhume(context.Background(), inhumePrm) - require.ErrorAs(t, err, &objLockedErr) - }) - - t.Run("inhuming lock objects", func(t *testing.T) { - ts := testutil.GenerateObjectWithCID(cnr) - - var inhumePrm InhumePrm - inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(lock)) - - _, err = sh.Inhume(context.Background(), inhumePrm) - require.Error(t, err) - - inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock)) - _, err = sh.Inhume(context.Background(), inhumePrm) - require.Error(t, err) - }) - - t.Run("force objects inhuming", func(t *testing.T) { - var inhumePrm InhumePrm - inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock)) - inhumePrm.ForceRemoval() - - _, err = sh.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - // it should be possible to remove - // lock object now - - inhumePrm = InhumePrm{} - inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - - _, err = sh.Inhume(context.Background(), inhumePrm) - require.NoError(t, err) - - // check that object has been removed - - var getPrm GetPrm - getPrm.SetAddress(objectcore.AddressOf(obj)) - - _, err = sh.Get(context.Background(), getPrm) - require.True(t, client.IsErrObjectNotFound(err)) - }) -} - -func TestShard_IsLocked(t *testing.T) { - sh := newShard(t, false) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - cnr := cidtest.ID() - obj := testutil.GenerateObjectWithCID(cnr) - cnrID, _ := obj.ContainerID() - objID, _ := obj.ID() - - lockID := oidtest.ID() - - // put the object - - var putPrm PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - // not locked object is not locked - - locked, err := sh.IsLocked(context.Background(), objectcore.AddressOf(obj)) - require.NoError(t, err) - - require.False(t, locked) - - // locked object is locked - - require.NoError(t, sh.Lock(context.Background(), cnrID, lockID, []oid.ID{objID})) - - locked, err = sh.IsLocked(context.Background(), objectcore.AddressOf(obj)) - require.NoError(t, err) - - require.True(t, locked) -} diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go deleted file mode 100644 index 087ba42ef..000000000 --- a/pkg/local_object_storage/shard/metrics.go +++ /dev/null @@ -1,60 +0,0 @@ -package shard - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - -// MetricsWriter is an interface that must store shard's metrics. -type MetricsWriter interface { - // SetObjectCounter must set object counter taking into account object type. - SetObjectCounter(objectType string, v uint64) - // AddToObjectCounter must update object counter taking into account object - // type. - // Negative parameter must decrease the counter. - AddToObjectCounter(objectType string, delta int) - // AddToContainerSize must add a value to the container size. - // Value can be negative. - AddToContainerSize(cnr string, value int64) - // AddToPayloadSize must add a value to the payload size. - // Value can be negative. - AddToPayloadSize(value int64) - // IncObjectCounter must increment shard's object counter taking into account - // object type. - IncObjectCounter(objectType string) - // SetShardID must set (update) the shard identifier that will be used in - // metrics. - SetShardID(id string) - // SetMode set mode of shard. - SetMode(mode mode.Mode) - // SetContainerObjectsCount sets container object count. - SetContainerObjectsCount(cnrID string, objectType string, value uint64) - // IncContainerObjectsCount increments container object count. - IncContainerObjectsCount(cnrID string, objectType string) - // SubContainerObjectsCount subtracts container object count. - SubContainerObjectsCount(cnrID string, objectType string, value uint64) - // IncRefillObjectsCount increments refill objects count. - IncRefillObjectsCount(path string, size int, success bool) - // SetRefillPercent sets refill percent. - SetRefillPercent(path string, percent uint32) - // SetRefillStatus sets refill status. - SetRefillStatus(path string, status string) - // SetEvacuationInProgress sets evacuation status - SetEvacuationInProgress(value bool) -} - -type noopMetrics struct{} - -var _ MetricsWriter = noopMetrics{} - -func (noopMetrics) SetObjectCounter(string, uint64) {} -func (noopMetrics) AddToObjectCounter(string, int) {} -func (noopMetrics) AddToContainerSize(string, int64) {} -func (noopMetrics) AddToPayloadSize(int64) {} -func (noopMetrics) IncObjectCounter(string) {} -func (noopMetrics) SetShardID(string) {} -func (noopMetrics) SetMode(mode.Mode) {} -func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {} -func (noopMetrics) IncContainerObjectsCount(string, string) {} -func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {} -func (noopMetrics) IncRefillObjectsCount(string, int, bool) {} -func (noopMetrics) SetRefillPercent(string, uint32) {} -func (noopMetrics) SetRefillStatus(string, string) {} -func (noopMetrics) SetEvacuationInProgress(bool) {} diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go deleted file mode 100644 index 5230dcad0..000000000 --- a/pkg/local_object_storage/shard/metrics_test.go +++ /dev/null @@ -1,433 +0,0 @@ -package shard - -import ( - "context" - "path/filepath" - "sync" - "testing" - "time" - - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -type metricsStore struct { - mtx sync.Mutex - objCounters map[string]uint64 - cnrSize map[string]int64 - cnrCount map[string]uint64 - pldSize int64 - mode mode.Mode - errCounter int64 - refillCount int64 - refillSize int64 - refillPercent uint32 - refillStatus string -} - -func newMetricStore() *metricsStore { - return &metricsStore{ - objCounters: map[string]uint64{ - "phy": 0, - "logic": 0, - }, - cnrSize: make(map[string]int64), - cnrCount: make(map[string]uint64), - } -} - -func (m *metricsStore) SetShardID(_ string) {} - -func (m *metricsStore) SetObjectCounter(objectType string, v uint64) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.objCounters[objectType] = v -} - -func (m *metricsStore) getObjectCounter(objectType string) uint64 { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.objCounters[objectType] -} - -func (m *metricsStore) containerSizes() map[string]int64 { - m.mtx.Lock() - defer m.mtx.Unlock() - - r := make(map[string]int64, len(m.cnrSize)) - for c, s := range m.cnrSize { - r[c] = s - } - return r -} - -func (m *metricsStore) payloadSize() int64 { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.pldSize -} - -func (m *metricsStore) AddToObjectCounter(objectType string, delta int) { - m.mtx.Lock() - defer m.mtx.Unlock() - switch { - case delta > 0: - m.objCounters[objectType] += uint64(delta) - case delta < 0: - uDelta := uint64(-delta) - - if m.objCounters[objectType] >= uDelta { - m.objCounters[objectType] -= uDelta - } else { - m.objCounters[objectType] = 0 - } - case delta == 0: - return - } -} - -func (m *metricsStore) IncObjectCounter(objectType string) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.objCounters[objectType] += 1 -} - -func (m *metricsStore) SetMode(mode mode.Mode) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.mode = mode -} - -func (m *metricsStore) AddToContainerSize(cnr string, size int64) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.cnrSize[cnr] += size -} - -func (m *metricsStore) AddToPayloadSize(size int64) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.pldSize += size -} - -func (m *metricsStore) IncErrorCounter() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.errCounter += 1 -} - -func (m *metricsStore) ClearErrorCounter() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.errCounter = 0 -} - -func (m *metricsStore) DeleteShardMetrics() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.errCounter = 0 -} - -func (m *metricsStore) SetContainerObjectsCount(cnrID string, objectType string, value uint64) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.cnrCount[cnrID+objectType] = value -} - -func (m *metricsStore) IncContainerObjectsCount(cnrID string, objectType string) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.cnrCount[cnrID+objectType]++ -} - -func (m *metricsStore) SubContainerObjectsCount(cnrID string, objectType string, value uint64) { - m.mtx.Lock() - defer m.mtx.Unlock() - existed := m.cnrCount[cnrID+objectType] - if existed < value { - panic("existed value smaller than value to sustract") - } - if existed == value { - delete(m.cnrCount, cnrID+objectType) - } else { - m.cnrCount[cnrID+objectType] -= value - } -} - -func (m *metricsStore) getContainerCount(cnrID, objectType string) (uint64, bool) { - m.mtx.Lock() - defer m.mtx.Unlock() - v, ok := m.cnrCount[cnrID+objectType] - return v, ok -} - -func (m *metricsStore) IncRefillObjectsCount(_ string, size int, success bool) { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.refillCount++ - m.refillSize += int64(size) -} - -func (m *metricsStore) SetRefillPercent(_ string, percent uint32) { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.refillPercent = percent -} - -func (m *metricsStore) SetRefillStatus(_ string, status string) { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.refillStatus = status -} - -func (m *metricsStore) SetEvacuationInProgress(bool) { -} - -func TestCounters(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - sh, mm := shardWithMetrics(t, dir) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - sh.SetMode(context.Background(), mode.ReadOnly) - require.Equal(t, mode.ReadOnly, mm.mode) - sh.SetMode(context.Background(), mode.ReadWrite) - require.Equal(t, mode.ReadWrite, mm.mode) - - const objNumber = 10 - oo := make([]*objectSDK.Object, objNumber) - for i := range objNumber { - oo[i] = testutil.GenerateObject() - } - - t.Run("defaults", func(t *testing.T) { - require.Zero(t, mm.getObjectCounter(physical)) - require.Zero(t, mm.getObjectCounter(logical)) - require.Empty(t, mm.containerSizes()) - require.Zero(t, mm.payloadSize()) - - for _, obj := range oo { - contID, _ := obj.ContainerID() - v, ok := mm.getContainerCount(contID.EncodeToString(), physical) - require.Zero(t, v) - require.False(t, ok) - v, ok = mm.getContainerCount(contID.EncodeToString(), logical) - require.Zero(t, v) - require.False(t, ok) - v, ok = mm.getContainerCount(contID.EncodeToString(), user) - require.Zero(t, v) - require.False(t, ok) - } - }) - - var totalPayload int64 - - expectedLogicalSizes := make(map[string]int64) - expected := make(map[cid.ID]meta.ObjectCounters) - for i := range oo { - cnr, _ := oo[i].ContainerID() - oSize := int64(oo[i].PayloadSize()) - expectedLogicalSizes[cnr.EncodeToString()] += oSize - totalPayload += oSize - expected[cnr] = meta.ObjectCounters{ - Logic: 1, - Phy: 1, - User: 1, - } - } - - var prm PutPrm - - for i := range objNumber { - prm.SetObject(oo[i]) - - _, err := sh.Put(context.Background(), prm) - require.NoError(t, err) - } - - require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical)) - require.Equal(t, uint64(objNumber), mm.getObjectCounter(logical)) - require.Equal(t, uint64(objNumber), mm.getObjectCounter(user)) - require.Equal(t, expectedLogicalSizes, mm.containerSizes()) - require.Equal(t, totalPayload, mm.payloadSize()) - - cc, err := sh.metaBase.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Equal(t, meta.ContainerCounters{Counts: expected}, cc) - - t.Run("inhume_GC", func(t *testing.T) { - var prm InhumePrm - inhumedNumber := objNumber / 4 - - for i := range inhumedNumber { - prm.MarkAsGarbage(objectcore.AddressOf(oo[i])) - - _, err := sh.Inhume(context.Background(), prm) - require.NoError(t, err) - - cid, ok := oo[i].ContainerID() - require.True(t, ok) - expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize()) - - if v, ok := expected[cid]; ok { - v.Logic-- - v.User-- - if v.IsZero() { - delete(expected, cid) - } else { - expected[cid] = v - } - } - } - - require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical)) - require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(logical)) - require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(user)) - require.Equal(t, expectedLogicalSizes, mm.containerSizes()) - require.Equal(t, totalPayload, mm.payloadSize()) - - cc, err := sh.metaBase.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Equal(t, meta.ContainerCounters{Counts: expected}, cc) - - oo = oo[inhumedNumber:] - }) - - t.Run("inhume_TS", func(t *testing.T) { - var prm InhumePrm - - phy := mm.getObjectCounter(physical) - logic := mm.getObjectCounter(logical) - custom := mm.getObjectCounter(user) - - inhumedNumber := int(phy / 4) - for _, o := range addrFromObjs(oo[:inhumedNumber]) { - ts := oidtest.Address() - ts.SetContainer(o.Container()) - prm.SetTarget(ts, o) - _, err := sh.Inhume(context.Background(), prm) - require.NoError(t, err) - } - - for i := range inhumedNumber { - cid, ok := oo[i].ContainerID() - require.True(t, ok) - expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize()) - - if v, ok := expected[cid]; ok { - v.Logic-- - v.User-- - if v.IsZero() { - delete(expected, cid) - } else { - expected[cid] = v - } - } - } - - require.Equal(t, phy, mm.getObjectCounter(physical)) - require.Equal(t, logic-uint64(inhumedNumber), mm.getObjectCounter(logical)) - require.Equal(t, custom-uint64(inhumedNumber), mm.getObjectCounter(user)) - require.Equal(t, expectedLogicalSizes, mm.containerSizes()) - require.Equal(t, totalPayload, mm.payloadSize()) - - cc, err = sh.metaBase.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Equal(t, meta.ContainerCounters{Counts: expected}, cc) - - oo = oo[inhumedNumber:] - }) - - t.Run("Delete", func(t *testing.T) { - var prm DeletePrm - - phy := mm.getObjectCounter(physical) - logic := mm.getObjectCounter(logical) - custom := mm.getObjectCounter(user) - - deletedNumber := int(phy / 4) - prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...) - - _, err := sh.Delete(context.Background(), prm) - require.NoError(t, err) - - require.Equal(t, phy-uint64(deletedNumber), mm.getObjectCounter(physical)) - require.Equal(t, logic-uint64(deletedNumber), mm.getObjectCounter(logical)) - require.Equal(t, custom-uint64(deletedNumber), mm.getObjectCounter(user)) - var totalRemovedpayload uint64 - for i := range oo[:deletedNumber] { - removedPayload := oo[i].PayloadSize() - totalRemovedpayload += removedPayload - - cnr, _ := oo[i].ContainerID() - expectedLogicalSizes[cnr.EncodeToString()] -= int64(removedPayload) - - if v, ok := expected[cnr]; ok { - v.Logic-- - v.Phy-- - v.User-- - expected[cnr] = v - } - } - require.Equal(t, expectedLogicalSizes, mm.containerSizes()) - require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.payloadSize()) - - cc, err = sh.metaBase.ContainerCounters(context.Background()) - require.NoError(t, err) - require.Equal(t, meta.ContainerCounters{Counts: expected}, cc) - }) -} - -func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) { - blobOpts := []blobstor.Option{ - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: fstree.New( - fstree.WithDirNameLen(2), - fstree.WithPath(filepath.Join(path, "blob")), - fstree.WithDepth(1)), - }, - }), - } - - mm := newMetricStore() - - sh := New( - WithID(NewIDFromBytes([]byte{})), - WithBlobStorOptions(blobOpts...), - WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))), - WithMetaBaseOptions( - meta.WithPath(filepath.Join(path, "meta")), - meta.WithEpochState(epochState{})), - WithMetricsWriter(mm), - WithGCRemoverSleepInterval(time.Hour), - ) - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - - return sh, mm -} - -func addrFromObjs(oo []*objectSDK.Object) []oid.Address { - aa := make([]oid.Address, len(oo)) - - for i := range len(oo) { - aa[i] = objectcore.AddressOf(oo[i]) - } - - return aa -} diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go deleted file mode 100644 index 901528976..000000000 --- a/pkg/local_object_storage/shard/mode.go +++ /dev/null @@ -1,85 +0,0 @@ -package shard - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "go.uber.org/zap" -) - -// ErrReadOnlyMode is returned when it is impossible to apply operation -// that changes shard's memory due to the "read-only" shard's mode. -var ErrReadOnlyMode = logicerr.New("shard is in read-only mode") - -// ErrDegradedMode is returned when operation requiring metabase is executed in degraded mode. -var ErrDegradedMode = logicerr.New("shard is in degraded mode") - -// SetMode sets mode of the shard. -// -// Returns any error encountered that did not allow -// setting shard mode. -func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error { - unlock := s.lockExclusive() - defer unlock() - - return s.setMode(ctx, m) -} - -func (s *Shard) setMode(ctx context.Context, m mode.Mode) error { - s.log.Info(ctx, logs.ShardSettingShardMode, - zap.Stringer("old_mode", s.info.Mode), - zap.Stringer("new_mode", m)) - - components := []interface { - SetMode(context.Context, mode.Mode) error - }{ - s.metaBase, s.blobStor, - } - - if s.hasWriteCache() { - components = append(components, s.writeCache) - } - - if s.pilorama != nil { - components = append(components, s.pilorama) - } - - // The usual flow of the requests (pilorama is independent): - // writecache -> blobstor -> metabase - // For mode.ReadOnly and mode.Degraded the order is: - // writecache -> blobstor -> metabase - // For mode.ReadWrite it is the opposite: - // metabase -> blobstor -> writecache - if m != mode.ReadWrite { - if s.hasWriteCache() { - components[0], components[2] = components[2], components[0] - } else { - components[0], components[1] = components[1], components[0] - } - } - - if !m.Disabled() { - for i := range components { - if err := components[i].SetMode(ctx, m); err != nil { - return err - } - } - } - - s.info.Mode = m - s.metricsWriter.SetMode(s.info.Mode) - - s.log.Info(ctx, logs.ShardShardModeSetSuccessfully, - zap.Stringer("mode", s.info.Mode)) - return nil -} - -// GetMode returns mode of the shard. -func (s *Shard) GetMode() mode.Mode { - s.m.RLock() - defer s.m.RUnlock() - - return s.info.Mode -} diff --git a/pkg/local_object_storage/shard/mode/mode.go b/pkg/local_object_storage/shard/mode/mode.go deleted file mode 100644 index dc4d52b0e..000000000 --- a/pkg/local_object_storage/shard/mode/mode.go +++ /dev/null @@ -1,123 +0,0 @@ -package mode - -import "math" - -// Mode represents enumeration of Shard work modes. -type Mode uint32 - -const ( - // ReadWrite is a Mode value for shard that is available - // for read and write operations. Default shard mode. - ReadWrite Mode = 0b000 - - // ReadOnly is a Mode value for shard that does not - // accept write operation but is readable. - ReadOnly Mode = 0b001 - - // Degraded is a Mode value for shard when the metabase is unavailable. - // It is hard to perform some modifying operations in this mode, thus it can only be set by an administrator. - Degraded Mode = 0b010 - - // Disabled mode is a mode where a shard is disabled. - // An existing shard can't have this mode, but it can be used in - // the configuration or control service commands. - Disabled Mode = math.MaxUint32 - - // DegradedReadOnly is a Mode value for shard that is set automatically - // after a certain number of errors is encountered. It is the same as - // `mode.Degraded` but also is read-only. - DegradedReadOnly Mode = Degraded | ReadOnly -) - -// ComponentMode represents basic operation modes for shared components, including READ, READ_WRITE, and DISABLED. -type ComponentMode uint32 - -const ( - // ComponentReadWrite is a Mode value for component that is available - // for read and write operations. Default component mode. - ComponentReadWrite ComponentMode = 0 - - // ComponentReadOnly is a Mode value for component that does not - // accept write operation but is readable. - ComponentReadOnly ComponentMode = 0b001 - - // ComponentDisabled mode is a mode where a component is disabled. - ComponentDisabled ComponentMode = math.MaxUint32 -) - -func (m Mode) String() string { - switch m { - default: - return "UNDEFINED" - case ReadWrite: - return "READ_WRITE" - case ReadOnly: - return "READ_ONLY" - case Degraded: - return "DEGRADED_READ_WRITE" - case DegradedReadOnly: - return "DEGRADED_READ_ONLY" - case Disabled: - return "DISABLED" - } -} - -func (m ComponentMode) String() string { - switch m { - default: - return "UNDEFINED" - case ComponentReadWrite: - return "READ_WRITE" - case ComponentReadOnly: - return "READ_ONLY" - case ComponentDisabled: - return "CLOSED" - } -} - -// NoMetabase returns true iff m is operating without the metabase. -func (m Mode) NoMetabase() bool { - return m&Degraded != 0 -} - -// ReadOnly returns true iff m prohibits modifying operations with shard. -func (m Mode) ReadOnly() bool { - return m&ReadOnly != 0 -} - -// ReadOnly returns true iff m prohibits modifying operations with shard. -func (m ComponentMode) ReadOnly() bool { - return m&ComponentReadOnly != 0 -} - -func (m Mode) Disabled() bool { - return m == Disabled -} - -func (m ComponentMode) Disabled() bool { - return m == ComponentDisabled -} - -// ConvertToComponentModeDegraded converts a ShardMode to a corresponding ComponentMode. -// Disables the component if the node is in degraded mode. Used in Metabase, Writecache, Pilorama. -func ConvertToComponentModeDegraded(m Mode) ComponentMode { - if m.NoMetabase() || m.Disabled() { - return ComponentDisabled - } - if m.ReadOnly() { - return ComponentReadOnly - } - return ComponentReadWrite -} - -// ConvertToComponentMode converts a ShardMode to a corresponding ComponentMode. -// Ignores the degraded mode of the node. Used in Blobstore. -func ConvertToComponentMode(m Mode) ComponentMode { - if m.Disabled() { - return ComponentDisabled - } - if m.ReadOnly() { - return ComponentReadOnly - } - return ComponentReadWrite -} diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go deleted file mode 100644 index f8cb00a31..000000000 --- a/pkg/local_object_storage/shard/put.go +++ /dev/null @@ -1,114 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// PutPrm groups the parameters of Put operation. -type PutPrm struct { - obj *objectSDK.Object - indexAttributes bool -} - -// PutRes groups the resulting values of Put operation. -type PutRes struct{} - -// SetObject is a Put option to set object to save. -func (p *PutPrm) SetObject(obj *objectSDK.Object) { - p.obj = obj -} - -func (p *PutPrm) SetIndexAttributes(v bool) { - p.indexAttributes = v -} - -// Put saves the object in shard. -// -// Returns any error encountered that -// did not allow to completely save the object. -// -// Returns ErrReadOnlyMode error if shard is in "read-only" mode. -func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Put", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - m := s.info.Mode - if m.ReadOnly() { - return PutRes{}, ErrReadOnlyMode - } - - data, err := prm.obj.Marshal() - if err != nil { - return PutRes{}, fmt.Errorf("cannot marshal object: %w", err) - } - - var putPrm common.PutPrm // form Put parameters - putPrm.Object = prm.obj - putPrm.RawData = data - putPrm.Address = objectCore.AddressOf(prm.obj) - - var res common.PutRes - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return PutRes{}, err - } - defer release() - - // exist check are not performed there, these checks should be executed - // ahead of `Put` by storage engine - tryCache := s.hasWriteCache() && !m.NoMetabase() - if tryCache { - res, err = s.writeCache.Put(ctx, putPrm) - } - if err != nil || !tryCache { - if err != nil { - s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, - zap.Error(err)) - } - - res, err = s.blobStor.Put(ctx, putPrm) - if err != nil { - return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err) - } - } - - if !m.NoMetabase() { - var pPrm meta.PutPrm - pPrm.SetObject(prm.obj) - pPrm.SetStorageID(res.StorageID) - pPrm.SetIndexAttributes(prm.indexAttributes) - res, err := s.metaBase.Put(ctx, pPrm) - if err != nil { - // may we need to handle this case in a special way - // since the object has been successfully written to BlobStor - return PutRes{}, fmt.Errorf("put object to metabase: %w", err) - } - - if res.Inserted { - s.incObjectCounter(putPrm.Address.Container(), meta.IsUserObject(prm.obj)) - s.addToPayloadSize(int64(prm.obj.PayloadSize())) - s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(prm.obj.PayloadSize())) - } - } - - return PutRes{}, nil -} diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go deleted file mode 100644 index 443689104..000000000 --- a/pkg/local_object_storage/shard/range.go +++ /dev/null @@ -1,147 +0,0 @@ -package shard - -import ( - "context" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// RngPrm groups the parameters of GetRange operation. -type RngPrm struct { - ln uint64 - - off uint64 - - addr oid.Address - - skipMeta bool -} - -// RngRes groups the resulting values of GetRange operation. -type RngRes struct { - obj *objectSDK.Object - hasMeta bool -} - -// SetAddress is a Rng option to set the address of the requested object. -// -// Option is required. -func (p *RngPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetRange is a GetRange option to set range of requested payload data. -func (p *RngPrm) SetRange(off uint64, ln uint64) { - p.off, p.ln = off, ln -} - -// SetIgnoreMeta is a Get option try to fetch object from blobstor directly, -// without accessing metabase. -func (p *RngPrm) SetIgnoreMeta(ignore bool) { - p.skipMeta = ignore -} - -// Object returns the requested object part. -// -// Instance payload contains the requested range of the original object. -func (r RngRes) Object() *objectSDK.Object { - return r.obj -} - -// HasMeta returns true if info about the object was found in the metabase. -func (r RngRes) HasMeta() bool { - return r.hasMeta -} - -// GetRange reads part of an object from shard. -// -// Returns any error encountered that -// did not allow to completely read the object part. -// -// Returns ErrRangeOutOfBounds if the requested object range is out of bounds. -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing. -// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard. -// Returns the objectSDK.ErrObjectIsExpired if the object is presented but already expired. -// Returns the ErrShardDisabled if the shard is disabled. -func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetRange", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("address", prm.addr.EncodeToString()), - attribute.Bool("skip_meta", prm.skipMeta), - attribute.String("offset", strconv.FormatUint(prm.off, 10)), - attribute.String("length", strconv.FormatUint(prm.ln, 10)), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.EvacuationInProgress { - return RngRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - if s.info.Mode.Disabled() { - return RngRes{}, ErrShardDisabled - } - - cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) { - var getRngPrm common.GetRangePrm - getRngPrm.Address = prm.addr - getRngPrm.Range.SetOffset(prm.off) - getRngPrm.Range.SetLength(prm.ln) - getRngPrm.StorageID = id - - res, err := stor.GetRange(ctx, getRngPrm) - if err != nil { - return nil, err - } - - obj := objectSDK.New() - obj.SetPayload(res.Data) - - return obj, nil - } - - wc := func(c writecache.Cache) (*objectSDK.Object, error) { - res, err := c.Get(ctx, prm.addr) - if err != nil { - return nil, err - } - - payload := res.Payload() - from := prm.off - to := from + prm.ln - if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { - return nil, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) - } - - obj := objectSDK.New() - obj.SetPayload(payload[from:to]) - return obj, nil - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return RngRes{}, err - } - defer release() - - skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() - obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) - - return RngRes{ - obj: obj, - hasMeta: hasMeta, - }, err -} diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go deleted file mode 100644 index 06fe9f511..000000000 --- a/pkg/local_object_storage/shard/range_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package shard - -import ( - "bytes" - "context" - "math" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestShard_GetRange(t *testing.T) { - t.Parallel() - t.Run("without write cache", func(t *testing.T) { - t.Parallel() - testShardGetRange(t, false) - }) - - t.Run("with write cache", func(t *testing.T) { - t.Parallel() - testShardGetRange(t, true) - }) -} - -func testShardGetRange(t *testing.T, hasWriteCache bool) { - type testCase struct { - hasErr bool - name string - payloadSize int - rng *objectSDK.Range - } - - const ( - writeCacheMaxSize = 1024 - smallObjectSize = 2048 - ) - - newRange := func(off, ln uint64) *objectSDK.Range { - rng := objectSDK.NewRange() - rng.SetOffset(off) - rng.SetLength(ln) - return rng - } - - testCases := []testCase{ - {false, "small object, good", 1024, newRange(11, 123)}, - {true, "small object, out of range, big len", 1024, newRange(10, 1020)}, - {true, "small object, out of range, big offset", 1024, newRange(1025, math.MaxUint64-10)}, - {false, "big object, good", 2048, newRange(11, 123)}, - {true, "big object, out of range, big len", 2048, newRange(100, 2000)}, - {true, "big object, out of range, big offset", 2048, newRange(2048, math.MaxUint64-10)}, - } - - if hasWriteCache { - testCases = append(testCases, - testCase{false, "object in write-cache, good", 100, newRange(2, 18)}, - testCase{true, "object in write-cache, out of range, big len", 100, newRange(4, 99)}, - testCase{true, "object in write-cache, out of range, big offset", 100, newRange(101, math.MaxUint64-10)}) - } - - wcOpts := []writecache.Option{ - writecache.WithMaxObjectSize(writeCacheMaxSize), - } - - sh := newCustomShard(t, hasWriteCache, shardOptions{ - wcOpts: wcOpts, - bsOpts: []blobstor.Option{ - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), - blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(1), - blobovniczatree.WithBlobovniczaShallowWidth(1)), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return len(data) <= smallObjectSize - }, - }, - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(t.TempDir(), "blob"))), - }, - }), - }, - }) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - testutil.AddPayload(obj, tc.payloadSize) - - addr := object.AddressOf(obj) - payload := bytes.Clone(obj.Payload()) - - var putPrm PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(t, err) - - var rngPrm RngPrm - rngPrm.SetAddress(addr) - rngPrm.SetRange(tc.rng.GetOffset(), tc.rng.GetLength()) - - res, err := sh.GetRange(context.Background(), rngPrm) - if tc.hasErr { - var target *apistatus.ObjectOutOfRange - require.ErrorAs(t, err, &target) - } else { - require.Equal(t, - payload[tc.rng.GetOffset():tc.rng.GetOffset()+tc.rng.GetLength()], - res.Object().Payload()) - } - }) - } -} diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go deleted file mode 100644 index 20f1f2b6f..000000000 --- a/pkg/local_object_storage/shard/rebuild.go +++ /dev/null @@ -1,193 +0,0 @@ -package shard - -import ( - "context" - "errors" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var ErrRebuildInProgress = errors.New("shard rebuild in progress") - -type rebuildTask struct { - concurrencyLimiter common.RebuildLimiter - fillPercent int -} - -type rebuilder struct { - mtx *sync.Mutex - wg *sync.WaitGroup - cancel func() - done chan struct{} - tasks chan rebuildTask -} - -func newRebuilder() *rebuilder { - return &rebuilder{ - mtx: &sync.Mutex{}, - wg: &sync.WaitGroup{}, - tasks: make(chan rebuildTask), - } -} - -func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) { - r.mtx.Lock() - defer r.mtx.Unlock() - - if r.done != nil { - return // already started - } - ctx, cancel := context.WithCancel(ctx) - r.cancel = cancel - r.done = make(chan struct{}) - r.wg.Add(1) - go func() { - defer r.wg.Done() - for { - select { - case <-r.done: - return - case t, ok := <-r.tasks: - if !ok { - continue - } - runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter) - } - } - }() -} - -func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger, - fillPercent int, concLimiter common.RebuildLimiter, -) { - select { - case <-ctx.Done(): - return - default: - } - log.Info(ctx, logs.BlobstoreRebuildStarted) - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) - if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { - log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) - } else { - log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) - } -} - -func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int, -) error { - select { - case <-ctx.Done(): - return ctx.Err() - case r.tasks <- rebuildTask{ - concurrencyLimiter: limiter, - fillPercent: fillPercent, - }: - return nil - default: - return ErrRebuildInProgress - } -} - -func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) { - r.mtx.Lock() - defer r.mtx.Unlock() - - if r.done != nil { - close(r.done) - } - if r.cancel != nil { - r.cancel() - } - r.wg.Wait() - r.cancel = nil - r.done = nil - log.Info(ctx, logs.BlobstoreRebuildStopped) -} - -var errMBIsNotAvailable = errors.New("metabase is not available") - -type mbStorageIDUpdate struct { - mb *meta.DB -} - -func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if u.mb == nil { - return errMBIsNotAvailable - } - - var prm meta.UpdateStorageIDPrm - prm.SetAddress(addr) - prm.SetStorageID(storageID) - _, err := u.mb.UpdateStorageID(ctx, prm) - return err -} - -type RebuildPrm struct { - ConcurrencyLimiter common.ConcurrencyLimiter - TargetFillPercent uint32 -} - -func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ScheduleRebuild", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Int64("target_fill_percent", int64(p.TargetFillPercent)), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - limiter := &rebuildLimiter{ - concurrencyLimiter: p.ConcurrencyLimiter, - rateLimiter: s.opsLimiter, - } - return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent)) -} - -var _ common.RebuildLimiter = (*rebuildLimiter)(nil) - -type rebuildLimiter struct { - concurrencyLimiter common.ConcurrencyLimiter - rateLimiter qos.Limiter -} - -func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { - return r.concurrencyLimiter.AcquireWorkSlot(ctx) -} - -func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) { - release, err := r.rateLimiter.ReadRequest(ctx) - return common.ReleaseFunc(release), err -} - -func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) { - release, err := r.rateLimiter.WriteRequest(ctx) - return common.ReleaseFunc(release), err -} diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go deleted file mode 100644 index d90343265..000000000 --- a/pkg/local_object_storage/shard/refill_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package shard - -import ( - "context" - "os" - "testing" - - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -func BenchmarkRefillMetabase(b *testing.B) { - b.Run("100 objects", func(b *testing.B) { - benchRefillMetabase(b, 100) - }) - - b.Run("1000 objects", func(b *testing.B) { - benchRefillMetabase(b, 1000) - }) - - b.Run("2000 objects", func(b *testing.B) { - benchRefillMetabase(b, 2000) - }) - - b.Run("5000 objects", func(b *testing.B) { - benchRefillMetabase(b, 5000) - }) -} - -func benchRefillMetabase(b *testing.B, objectsCount int) { - sh := newCustomShard(b, false, shardOptions{ - additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)}, - }) - - defer func() { require.NoError(b, sh.Close(context.Background())) }() - - var putPrm PutPrm - - for range objectsCount / 2 { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj - - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(b, err) - } - - for range objectsCount / 2 { - obj := testutil.GenerateObject() - testutil.AddAttribute(obj, "foo", "bar") - obj.SetID(oidtest.ID()) - testutil.AddPayload(obj, 1<<20) // fstree obj - - putPrm.SetObject(obj) - - _, err := sh.Put(context.Background(), putPrm) - require.NoError(b, err) - } - - require.NoError(b, sh.Close(context.Background())) - require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path)) - - require.NoError(b, sh.Open(context.Background())) - sh.cfg.refillMetabase = true - - b.ReportAllocs() - b.ResetTimer() - - require.NoError(b, sh.Init(context.Background())) - - require.NoError(b, sh.Close(context.Background())) -} diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go deleted file mode 100644 index e563f390b..000000000 --- a/pkg/local_object_storage/shard/reload_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package shard - -import ( - "context" - "os" - "path/filepath" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "github.com/stretchr/testify/require" -) - -func TestShardReload(t *testing.T) { - t.Parallel() - - p := t.Name() - defer os.RemoveAll(p) - l := test.NewLogger(t) - blobOpts := []blobstor.Option{ - blobstor.WithLogger(l), - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(p, "blob")), - fstree.WithDepth(1)), - }, - }), - } - - metaOpts := []meta.Option{ - meta.WithPath(filepath.Join(p, "meta")), - meta.WithEpochState(epochState{}), - } - - opts := []Option{ - WithID(NewIDFromBytes([]byte{})), - WithLogger(l), - WithBlobStorOptions(blobOpts...), - WithMetaBaseOptions(metaOpts...), - WithPiloramaOptions( - pilorama.WithPath(filepath.Join(p, "pilorama"))), - WithMetricsWriter(newMetricStore()), - } - - sh := New(opts...) - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - - defer func() { - require.NoError(t, sh.Close(context.Background())) - }() - - objects := make([]objAddr, 5) - for i := range objects { - objects[i].obj = newObject() - objects[i].addr = objectCore.AddressOf(objects[i].obj) - require.NoError(t, putObject(sh, objects[i].obj)) - } - - checkHasObjects := func(t *testing.T, exists bool) { - for i := range objects { - var prm ExistsPrm - prm.Address = objects[i].addr - - res, err := sh.Exists(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, exists, res.Exists(), "object #%d is missing", i) - } - } - - checkHasObjects(t, true) - - t.Run("same config, no-op", func(t *testing.T) { - require.NoError(t, sh.Reload(context.Background(), opts...)) - checkHasObjects(t, true) - }) - - t.Run("open meta at new path", func(t *testing.T) { - newShardOpts := func(metaPath string, resync bool) []Option { - metaOpts := []meta.Option{meta.WithPath(metaPath), meta.WithEpochState(epochState{})} - return append(opts, WithMetaBaseOptions(metaOpts...), WithRefillMetabase(resync)) - } - - newOpts := newShardOpts(filepath.Join(p, "meta1"), false) - require.NoError(t, sh.Reload(context.Background(), newOpts...)) - - checkHasObjects(t, false) // new path, but no resync - - t.Run("can put objects", func(t *testing.T) { - obj := newObject() - require.NoError(t, putObject(sh, obj)) - objects = append(objects, objAddr{obj: obj, addr: objectCore.AddressOf(obj)}) - }) - - newOpts = newShardOpts(filepath.Join(p, "meta2"), true) - require.NoError(t, sh.Reload(context.Background(), newOpts...)) - - checkHasObjects(t, true) // all objects are restored, including the new one - - t.Run("reload failed", func(t *testing.T) { - badPath := filepath.Join(p, "meta3") - require.NoError(t, os.WriteFile(badPath, []byte{1}, 0)) - - newOpts = newShardOpts(badPath, true) - require.Error(t, sh.Reload(context.Background(), newOpts...)) - - // Cleanup is done, no panic. - obj := newObject() - require.ErrorIs(t, putObject(sh, obj), ErrReadOnlyMode) - - // Old objects are still accessible. - checkHasObjects(t, true) - - // Successive reload produces no undesired effects. - require.NoError(t, os.RemoveAll(badPath)) - require.NoError(t, sh.Reload(context.Background(), newOpts...)) - - obj = newObject() - require.NoError(t, putObject(sh, obj)) - - objects = append(objects, objAddr{obj: obj, addr: objectCore.AddressOf(obj)}) - checkHasObjects(t, true) - }) - }) -} - -func putObject(sh *Shard, obj *objectSDK.Object) error { - var prm PutPrm - prm.SetObject(obj) - - _, err := sh.Put(context.Background(), prm) - return err -} - -func newObject() *objectSDK.Object { - x := objectSDK.New() - ver := version.Current() - - x.SetID(oidtest.ID()) - x.SetSessionToken(sessiontest.Object()) - x.SetPayload([]byte{1, 2, 3}) - x.SetPayloadSize(3) - x.SetOwnerID(usertest.ID()) - x.SetContainerID(cidtest.ID()) - x.SetType(objectSDK.TypeRegular) - x.SetVersion(&ver) - x.SetPayloadChecksum(checksumtest.Checksum()) - x.SetPayloadHomomorphicHash(checksumtest.Checksum()) - return x -} diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go deleted file mode 100644 index fbc751e26..000000000 --- a/pkg/local_object_storage/shard/select.go +++ /dev/null @@ -1,82 +0,0 @@ -package shard - -import ( - "context" - "fmt" - - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// SelectPrm groups the parameters of Select operation. -type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters - isIndexedContainer bool -} - -// SelectRes groups the resulting values of Select operation. -type SelectRes struct { - addrList []oid.Address -} - -// SetContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) { - p.cnr = cnr - p.isIndexedContainer = isIndexedContainer -} - -// SetFilters is a Select option to set the object filters. -func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) { - p.filters = fs -} - -// AddressList returns list of addresses of the selected objects. -func (r SelectRes) AddressList() []oid.Address { - return r.addrList -} - -// Select selects the objects from shard that match select parameters. -// -// Returns any error encountered that -// did not allow to completely select the objects. -func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Select", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", prm.cnr.EncodeToString()), - )) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return SelectRes{}, ErrDegradedMode - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return SelectRes{}, nil - } - defer release() - - var selectPrm meta.SelectPrm - selectPrm.SetFilters(prm.filters) - selectPrm.SetContainerID(prm.cnr) - selectPrm.SetUseAttributeIndex(prm.isIndexedContainer) - - mRes, err := s.metaBase.Select(ctx, selectPrm) - if err != nil { - return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err) - } - - return SelectRes{ - addrList: mRes.AddressList(), - }, nil -} diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go deleted file mode 100644 index f21541d9d..000000000 --- a/pkg/local_object_storage/shard/shard.go +++ /dev/null @@ -1,562 +0,0 @@ -package shard - -import ( - "context" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -// Shard represents single shard of FrostFS Local Storage Engine. -type Shard struct { - *cfg - - gc *gc - - writeCache writecache.Cache - - blobStor *blobstor.BlobStor - - pilorama pilorama.ForestStorage - - metaBase *meta.DB - - tsSource TombstoneSource - - rb *rebuilder - - opsLimiter *atomicOpsLimiter - - gcCancel atomic.Value - setModeRequested atomic.Bool - writecacheSealCancel atomic.Pointer[writecacheSealCanceler] -} - -// Option represents Shard's constructor option. -type Option func(*cfg) - -// ExpiredTombstonesCallback is a callback handling list of expired tombstones. -type ExpiredTombstonesCallback func(context.Context, []meta.TombstonedObject) - -// ExpiredObjectsCallback is a callback handling list of expired objects. -type ExpiredObjectsCallback func(context.Context, uint64, []oid.Address) - -// DeletedLockCallback is a callback handling list of deleted LOCK objects. -type DeletedLockCallback func(context.Context, []oid.Address) - -// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers. -type EmptyContainersCallback func(context.Context, []cid.ID) - -type cfg struct { - m sync.RWMutex - - refillMetabase bool - refillMetabaseWorkersCount int - - rmBatchSize int - - useWriteCache bool - - info Info - - blobOpts []blobstor.Option - - metaOpts []meta.Option - - writeCacheOpts []writecache.Option - - piloramaOpts []pilorama.Option - - log *logger.Logger - - gcCfg gcCfg - - expiredTombstonesCallback ExpiredTombstonesCallback - - expiredLocksCallback ExpiredObjectsCallback - - deletedLockCallBack DeletedLockCallback - - zeroSizeContainersCallback EmptyContainersCallback - zeroCountContainersCallback EmptyContainersCallback - - tsSource TombstoneSource - - metricsWriter MetricsWriter - - reportErrorFunc func(ctx context.Context, selfID string, message string, err error) - - containerInfo container.InfoProvider - - configOpsLimiter qos.Limiter -} - -func defaultCfg() *cfg { - return &cfg{ - rmBatchSize: 100, - log: logger.NewLoggerWrapper(zap.L()), - gcCfg: defaultGCCfg(), - reportErrorFunc: func(context.Context, string, string, error) {}, - zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, - zeroCountContainersCallback: func(context.Context, []cid.ID) {}, - metricsWriter: noopMetrics{}, - configOpsLimiter: qos.NewNoopLimiter(), - } -} - -// New creates, initializes and returns new Shard instance. -func New(opts ...Option) *Shard { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - bs := blobstor.New(c.blobOpts...) - mb := meta.New(c.metaOpts...) - - s := &Shard{ - cfg: c, - blobStor: bs, - metaBase: mb, - tsSource: c.tsSource, - opsLimiter: newAtomicOpsLimiter(c.configOpsLimiter), - } - - reportFunc := func(ctx context.Context, msg string, err error) { - s.reportErrorFunc(ctx, s.ID().String(), msg, err) - } - - s.blobStor.SetReportErrorFunc(reportFunc) - - if c.useWriteCache { - s.writeCache = writecache.New( - append(c.writeCacheOpts, - writecache.WithReportErrorFunc(reportFunc), - writecache.WithBlobstor(bs), - writecache.WithMetabase(mb), - writecache.WithQoSLimiter(s.opsLimiter))...) - s.writeCache.GetMetrics().SetPath(s.writeCache.DumpInfo().Path) - } - - if s.piloramaOpts != nil { - s.pilorama = pilorama.NewBoltForest(c.piloramaOpts...) - } - - s.fillInfo() - s.writecacheSealCancel.Store(notInitializedCancel) - - return s -} - -// WithID returns option to set the default shard identifier. -func WithID(id *ID) Option { - return func(c *cfg) { - c.info.ID = id - } -} - -// WithBlobStorOptions returns option to set internal BlobStor options. -func WithBlobStorOptions(opts ...blobstor.Option) Option { - return func(c *cfg) { - c.blobOpts = opts - } -} - -// WithMetaBaseOptions returns option to set internal metabase options. -func WithMetaBaseOptions(opts ...meta.Option) Option { - return func(c *cfg) { - c.metaOpts = opts - } -} - -// WithWriteCacheOptions returns option to set internal write cache options. -func WithWriteCacheOptions(opts []writecache.Option) Option { - return func(c *cfg) { - c.writeCacheOpts = opts - } -} - -// WithWriteCacheMetrics returns an option to set the metrics register used by the write cache. -func WithWriteCacheMetrics(wcMetrics writecache.Metrics) Option { - return func(c *cfg) { - c.writeCacheOpts = append(c.writeCacheOpts, writecache.WithMetrics(wcMetrics)) - } -} - -// WithPiloramaOptions returns option to set internal write cache options. -func WithPiloramaOptions(opts ...pilorama.Option) Option { - return func(c *cfg) { - c.piloramaOpts = opts - } -} - -// WithLogger returns option to set Shard's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - c.gcCfg.log = l.WithTag(logger.TagGC) - } -} - -// WithWriteCache returns option to toggle write cache usage. -func WithWriteCache(use bool) Option { - return func(c *cfg) { - c.useWriteCache = use - } -} - -// hasWriteCache returns bool if write cache exists on shards. -func (s *Shard) hasWriteCache() bool { - return s.useWriteCache -} - -// NeedRefillMetabase returns true if metabase is needed to be refilled. -func (s *Shard) NeedRefillMetabase() bool { - return s.cfg.refillMetabase -} - -// WithRemoverBatchSize returns option to set batch size -// of single removal operation. -func WithRemoverBatchSize(sz int) Option { - return func(c *cfg) { - c.rmBatchSize = sz - } -} - -// WithGCWorkerPoolInitializer returns option to set initializer of -// worker pool with specified worker number. -func WithGCWorkerPoolInitializer(wpInit func(int) util.WorkerPool) Option { - return func(c *cfg) { - c.gcCfg.workerPoolInit = wpInit - } -} - -// WithGCRemoverSleepInterval returns option to specify sleep -// interval between object remover executions. -func WithGCRemoverSleepInterval(dur time.Duration) Option { - return func(c *cfg) { - c.gcCfg.removerInterval = dur - } -} - -// WithExpiredTombstonesCallback returns option to specify callback -// of the expired tombstones handler. -func WithExpiredTombstonesCallback(cb ExpiredTombstonesCallback) Option { - return func(c *cfg) { - c.expiredTombstonesCallback = cb - } -} - -// WithExpiredLocksCallback returns option to specify callback -// of the expired LOCK objects handler. -func WithExpiredLocksCallback(cb ExpiredObjectsCallback) Option { - return func(c *cfg) { - c.expiredLocksCallback = cb - } -} - -// WithRefillMetabase returns option to set flag to refill the Metabase on Shard's initialization step. -func WithRefillMetabase(v bool) Option { - return func(c *cfg) { - c.refillMetabase = v - } -} - -// WithRefillMetabaseWorkersCount returns option to set count of workers to refill the Metabase on Shard's initialization step. -func WithRefillMetabaseWorkersCount(v int) Option { - return func(c *cfg) { - c.refillMetabaseWorkersCount = v - } -} - -// WithMode returns option to set shard's mode. Mode must be one of the predefined: -// - mode.ReadWrite; -// - mode.ReadOnly. -func WithMode(v mode.Mode) Option { - return func(c *cfg) { - c.info.Mode = v - } -} - -// WithTombstoneSource returns option to set TombstoneSource. -func WithTombstoneSource(v TombstoneSource) Option { - return func(c *cfg) { - c.tsSource = v - } -} - -// WithDeletedLockCallback returns option to specify callback -// of the deleted LOCK objects handler. -func WithDeletedLockCallback(v DeletedLockCallback) Option { - return func(c *cfg) { - c.deletedLockCallBack = v - } -} - -// WithMetricsWriter returns option to specify storage of the -// shard's metrics. -func WithMetricsWriter(v MetricsWriter) Option { - return func(c *cfg) { - c.metricsWriter = v - } -} - -// WithGCMetrics returns option to specify storage of the GC metrics. -func WithGCMetrics(v GCMectrics) Option { - return func(c *cfg) { - c.gcCfg.metrics = v - } -} - -// WithReportErrorFunc returns option to specify callback for handling storage-related errors -// in the background workers. -func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option { - return func(c *cfg) { - c.reportErrorFunc = f - } -} - -// WithExpiredCollectorBatchSize returns option to set batch size -// of expired object collection operation. -func WithExpiredCollectorBatchSize(size int) Option { - return func(c *cfg) { - c.gcCfg.expiredCollectorBatchSize = size - } -} - -// WithExpiredCollectorWorkerCount returns option to set concurrent -// workers count of expired object collection operation. -func WithExpiredCollectorWorkerCount(count int) Option { - return func(c *cfg) { - c.gcCfg.expiredCollectorWorkerCount = count - } -} - -// WithDisabledGC disables GC. -// For testing purposes only. -func WithDisabledGC() Option { - return func(c *cfg) { - c.gcCfg.testHookRemover = func(_ context.Context) gcRunResult { return gcRunResult{} } - } -} - -// WithZeroSizeCallback returns option to set zero-size containers callback. -func WithZeroSizeCallback(cb EmptyContainersCallback) Option { - return func(c *cfg) { - c.zeroSizeContainersCallback = cb - } -} - -// WithZeroCountCallback returns option to set zero-count containers callback. -func WithZeroCountCallback(cb EmptyContainersCallback) Option { - return func(c *cfg) { - c.zeroCountContainersCallback = cb - } -} - -// WithContainerInfoProvider returns option to set container info provider. -func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { - return func(c *cfg) { - c.containerInfo = containerInfo - } -} - -func WithLimiter(l qos.Limiter) Option { - return func(c *cfg) { - c.configOpsLimiter = l - } -} - -func (s *Shard) fillInfo() { - s.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.info.BlobStorInfo = s.blobStor.DumpInfo() - s.info.Mode = s.GetMode() - - if s.useWriteCache { - s.info.WriteCacheInfo = s.writeCache.DumpInfo() - } - if s.pilorama != nil { - s.info.PiloramaInfo = s.pilorama.DumpInfo() - } -} - -const ( - // physical is a physically stored object - // counter type. - physical = "phy" - - // logical is a logically stored object - // counter type (excludes objects that are - // stored but unavailable). - logical = "logic" - // user is an available small or big regular object. - user = "user" -) - -func (s *Shard) updateMetrics(ctx context.Context) { - if s.GetMode().NoMetabase() { - return - } - - cc, err := s.metaBase.ObjectCounters() - if err != nil { - s.log.Warn(ctx, logs.ShardMetaObjectCounterRead, - zap.Error(err), - ) - - return - } - - s.setObjectCounterBy(physical, cc.Phy) - s.setObjectCounterBy(logical, cc.Logic) - s.setObjectCounterBy(user, cc.User) - - cnrList, err := s.metaBase.Containers(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err)) - return - } - - var totalPayload uint64 - - for i := range cnrList { - size, err := s.metaBase.ContainerSize(cnrList[i]) - if err != nil { - s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize, - zap.String("cid", cnrList[i].EncodeToString()), - zap.Error(err)) - continue - } - s.addToContainerSize(cnrList[i].EncodeToString(), int64(size)) - totalPayload += size - } - - s.addToPayloadSize(int64(totalPayload)) - - contCount, err := s.metaBase.ContainerCounters(ctx) - if err != nil { - s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err)) - return - } - for contID, count := range contCount.Counts { - s.setContainerObjectsCount(contID.EncodeToString(), physical, count.Phy) - s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) - s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) - } - s.metricsWriter.SetMode(s.info.Mode) -} - -// incObjectCounter increment both physical and logical object -// counters. -func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - s.metricsWriter.IncObjectCounter(physical) - s.metricsWriter.IncObjectCounter(logical) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) - if isUser { - s.metricsWriter.IncObjectCounter(user) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) - } -} - -func (s *Shard) decObjectCounterBy(typ string, v uint64) { - if v > 0 { - s.metricsWriter.AddToObjectCounter(typ, -int(v)) - } -} - -func (s *Shard) setObjectCounterBy(typ string, v uint64) { - if v > 0 { - s.metricsWriter.SetObjectCounter(typ, v) - } -} - -func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { - for cnrID, count := range byCnr { - if count.Phy > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) - } - if count.Logic > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) - } - if count.User > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) - } - } -} - -func (s *Shard) addToContainerSize(cnr string, size int64) { - if size != 0 { - s.metricsWriter.AddToContainerSize(cnr, size) - } -} - -func (s *Shard) addToPayloadSize(size int64) { - if size != 0 { - s.metricsWriter.AddToPayloadSize(size) - } -} - -func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) { - if v > 0 { - s.metricsWriter.SetContainerObjectsCount(cnr, typ, v) - } -} - -func (s *Shard) SetEvacuationInProgress(val bool) { - s.m.Lock() - defer s.m.Unlock() - s.info.EvacuationInProgress = val - s.metricsWriter.SetEvacuationInProgress(val) -} - -var _ qos.Limiter = &atomicOpsLimiter{} - -func newAtomicOpsLimiter(l qos.Limiter) *atomicOpsLimiter { - result := &atomicOpsLimiter{} - result.ptr.Store(&qosLimiterHolder{Limiter: l}) - return result -} - -type atomicOpsLimiter struct { - ptr atomic.Pointer[qosLimiterHolder] -} - -func (a *atomicOpsLimiter) Close() { - a.ptr.Load().Close() -} - -func (a *atomicOpsLimiter) ReadRequest(ctx context.Context) (qos.ReleaseFunc, error) { - return a.ptr.Load().ReadRequest(ctx) -} - -func (a *atomicOpsLimiter) SetMetrics(m qos.Metrics) { - a.ptr.Load().SetMetrics(m) -} - -func (a *atomicOpsLimiter) SetParentID(id string) { - a.ptr.Load().SetParentID(id) -} - -func (a *atomicOpsLimiter) WriteRequest(ctx context.Context) (qos.ReleaseFunc, error) { - return a.ptr.Load().WriteRequest(ctx) -} - -type qosLimiterHolder struct { - qos.Limiter -} diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go deleted file mode 100644 index 84be71c4d..000000000 --- a/pkg/local_object_storage/shard/shard_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package shard - -import ( - "context" - "path/filepath" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/panjf2000/ants/v2" - "github.com/stretchr/testify/require" -) - -type epochState struct { - Value uint64 -} - -func (s epochState) CurrentEpoch() uint64 { - return s.Value -} - -type shardOptions struct { - rootPath string - wcOpts []writecache.Option - bsOpts []blobstor.Option - metaOptions []meta.Option - - additionalShardOptions []Option -} - -func newShard(t testing.TB, enableWriteCache bool) *Shard { - return newCustomShard(t, enableWriteCache, shardOptions{}) -} - -func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { - if o.rootPath == "" { - o.rootPath = t.TempDir() - } - - var sh *Shard - if enableWriteCache { - o.wcOpts = append( - []writecache.Option{writecache.WithPath(filepath.Join(o.rootPath, "wcache"))}, - o.wcOpts...) - } - - if o.bsOpts == nil { - o.bsOpts = []blobstor.Option{ - blobstor.WithLogger(test.NewLogger(t)), - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), - blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(1), - blobovniczatree.WithBlobovniczaShallowWidth(1)), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return len(data) <= 1<<20 - }, - }, - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(o.rootPath, "blob"))), - }, - }), - } - } - - opts := []Option{ - WithID(NewIDFromBytes([]byte{})), - WithLogger(test.NewLogger(t)), - WithBlobStorOptions(o.bsOpts...), - WithMetaBaseOptions( - append([]meta.Option{ - meta.WithPath(filepath.Join(o.rootPath, "meta")), meta.WithEpochState(epochState{}), - }, - o.metaOptions...)..., - ), - WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))), - WithWriteCache(enableWriteCache), - WithWriteCacheOptions(o.wcOpts), - WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(ctx, addresses) - }), - WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { - sh.HandleExpiredLocks(ctx, epoch, a) - }), - WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - return pool - }), - WithGCRemoverSleepInterval(100 * time.Millisecond), - } - opts = append(opts, o.additionalShardOptions...) - - sh = New(opts...) - - require.NoError(t, sh.Open(context.Background())) - require.NoError(t, sh.Init(context.Background())) - - return sh -} diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go deleted file mode 100644 index b1232707f..000000000 --- a/pkg/local_object_storage/shard/shutdown_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package shard - -import ( - "context" - "crypto/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestWriteCacheObjectLoss(t *testing.T) { - t.Parallel() - - const ( - smallSize = 1024 - objCount = 100 - ) - - objects := make([]*objectSDK.Object, objCount) - for i := range objects { - size := smallSize - // if i%2 == 0 { - size = smallSize / 2 - // } - data := make([]byte, size) - rand.Read(data) - - objects[i] = testutil.GenerateObjectWithCIDWithPayload(cidtest.ID(), data) - } - - dir := t.TempDir() - wcOpts := []writecache.Option{ - writecache.WithMaxObjectSize(smallSize * 2), - } - - sh := newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts}) - - var errG errgroup.Group - for i := range objects { - obj := objects[i] - errG.Go(func() error { - var putPrm PutPrm - putPrm.SetObject(obj) - _, err := sh.Put(context.Background(), putPrm) - return err - }) - } - require.NoError(t, errG.Wait()) - require.NoError(t, sh.Close(context.Background())) - - sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts}) - defer func() { require.NoError(t, sh.Close(context.Background())) }() - - var getPrm GetPrm - - for i := range objects { - getPrm.SetAddress(object.AddressOf(objects[i])) - - _, err := sh.Get(context.Background(), getPrm) - require.NoError(t, err, i) - } -} diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go deleted file mode 100644 index db361a8bd..000000000 --- a/pkg/local_object_storage/shard/tree.go +++ /dev/null @@ -1,541 +0,0 @@ -package shard - -import ( - "context" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var _ pilorama.Forest = (*Shard)(nil) - -// ErrPiloramaDisabled is returned when pilorama was disabled in the configuration. -var ErrPiloramaDisabled = logicerr.New("pilorama is disabled") - -// TreeMove implements the pilorama.Forest interface. -func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeMove", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", d.CID.EncodeToString()), - attribute.Int("position", d.Position), - attribute.Int("size", d.Size), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return nil, ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.pilorama.TreeMove(ctx, d, treeID, m) -} - -// TreeAddByPath implements the pilorama.Forest interface. -func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeAddByPath", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", d.CID.EncodeToString()), - attribute.Int("position", d.Position), - attribute.Int("size", d.Size), - attribute.String("tree_id", treeID), - attribute.String("attr", attr), - attribute.Int("path_count", len(path)), - attribute.Int("meta_count", len(meta)), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return nil, ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) -} - -// TreeApply implements the pilorama.Forest interface. -func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApply", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.Bool("background", backgroundSync), - ), - ) - defer span.End() - - if s.pilorama == nil { - return ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) -} - -// TreeApplyBatch implements the pilorama.Forest interface. -func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if s.pilorama == nil { - return ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) -} - -// TreeGetByPath implements the pilorama.Forest interface. -func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("attr", attr), - attribute.Int("path_count", len(path)), - attribute.Bool("latest", latest), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) -} - -// TreeGetMeta implements the pilorama.Forest interface. -func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetMeta", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("node_id", strconv.FormatUint(nodeID, 10)), - ), - ) - defer span.End() - - if s.pilorama == nil { - return pilorama.Meta{}, 0, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return pilorama.Meta{}, 0, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return pilorama.Meta{}, 0, err - } - defer release() - return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) -} - -// TreeGetChildren implements the pilorama.Forest interface. -func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]pilorama.NodeInfo, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetChildren", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("node_id", strconv.FormatUint(nodeID, 10)), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) -} - -// TreeSortedByFilename implements the pilorama.Forest interface. -func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, last, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, last, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, last, err - } - defer release() - return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) -} - -// TreeGetOpLog implements the pilorama.Forest interface. -func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetOpLog", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("height", strconv.FormatUint(height, 10)), - ), - ) - defer span.End() - - if s.pilorama == nil { - return pilorama.Move{}, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return pilorama.Move{}, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return pilorama.Move{}, err - } - defer release() - return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) -} - -// TreeDrop implements the pilorama.Forest interface. -func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeDrop", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if s.pilorama == nil { - return ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.pilorama.TreeDrop(ctx, cid, treeID) -} - -// TreeList implements the pilorama.Forest interface. -func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeList", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.pilorama.TreeList(ctx, cid) -} - -func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeHeight", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return 0, ErrDegradedMode - } - - if s.pilorama == nil { - return 0, ErrPiloramaDisabled - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() - return s.pilorama.TreeHeight(ctx, cid, treeID) -} - -// TreeExists implements the pilorama.Forest interface. -func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeExists", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if s.pilorama == nil { - return false, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return false, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return false, err - } - defer release() - return s.pilorama.TreeExists(ctx, cid, treeID) -} - -// TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeUpdateLastSyncHeight", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - attribute.String("height", strconv.FormatUint(height, 10)), - ), - ) - defer span.End() - - if s.pilorama == nil { - return ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) -} - -// TreeLastSyncHeight implements the pilorama.Forest interface. -func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeLastSyncHeight", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cid.EncodeToString()), - attribute.String("tree_id", treeID), - ), - ) - defer span.End() - - if s.pilorama == nil { - return 0, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return 0, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() - return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) -} - -func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm) (*pilorama.TreeListTreesResult, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeListTrees", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - ), - ) - defer span.End() - - if s.pilorama == nil { - return nil, ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.pilorama.TreeListTrees(ctx, prm) -} - -func (s *Shard) PiloramaEnabled() bool { - return s.pilorama != nil -} - -func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *pilorama.Move) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyStream", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.String("container_id", cnr.EncodeToString()), - attribute.String("tree_id", treeID)), - ) - defer span.End() - - if s.pilorama == nil { - return ErrPiloramaDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) -} diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go deleted file mode 100644 index 9edb89df8..000000000 --- a/pkg/local_object_storage/shard/writecache.go +++ /dev/null @@ -1,161 +0,0 @@ -package shard - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var ( - dummyCancel = &writecacheSealCanceler{cancel: func() {}} - notInitializedCancel = &writecacheSealCanceler{cancel: func() {}} - errWriteCacheSealing = errors.New("writecache is already sealing or shard is not initialized") -) - -type writecacheSealCanceler struct { - cancel context.CancelFunc -} - -// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation. -type FlushWriteCachePrm struct { - ignoreErrors bool - seal bool -} - -// SetIgnoreErrors sets the flag to ignore read-errors during flush. -func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - -// SetSeal sets the flag to left writecache in read-only mode after flush. -func (p *FlushWriteCachePrm) SetSeal(v bool) { - p.seal = v -} - -// errWriteCacheDisabled is returned when an operation on write-cache is performed, -// but write-cache is disabled. -var errWriteCacheDisabled = errors.New("write-cache is disabled") - -// FlushWriteCache flushes all data from the write-cache. -func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.FlushWriteCache", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Bool("ignore_errors", p.ignoreErrors), - attribute.Bool("seal", p.seal), - )) - defer span.End() - - if !s.hasWriteCache() { - return errWriteCacheDisabled - } - - s.m.RLock() - defer s.m.RUnlock() - - // To write data to the blobstor we need to write to the blobstor and the metabase. - if s.info.Mode.ReadOnly() { - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - return ErrDegradedMode - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - - return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) -} - -type SealWriteCachePrm struct { - IgnoreErrors bool - Async bool - RestoreMode bool - Shrink bool -} - -// SealWriteCache flushes all data from the write-cache and moves it to degraded read only mode. -func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.SealWriteCache", - trace.WithAttributes( - attribute.String("shard_id", s.ID().String()), - attribute.Bool("ignore_errors", p.IgnoreErrors), - attribute.Bool("restore_mode", p.RestoreMode), - )) - defer span.End() - - if !s.hasWriteCache() { - return errWriteCacheDisabled - } - - if p.Async { - ctx = context.WithoutCancel(ctx) - } - ctx, cancel := context.WithCancel(ctx) - canceler := &writecacheSealCanceler{cancel: cancel} - if !s.writecacheSealCancel.CompareAndSwap(dummyCancel, canceler) { - return errWriteCacheSealing - } - s.m.RLock() - cleanup := func() { - s.m.RUnlock() - s.writecacheSealCancel.Store(dummyCancel) - } - - if s.info.Mode.ReadOnly() { - cleanup() - return ErrReadOnlyMode - } - if s.info.Mode.NoMetabase() { - cleanup() - return ErrDegradedMode - } - - if !p.Async { - defer cleanup() - } - prm := writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink} - if p.Async { - started := make(chan struct{}) - go func() { - close(started) - defer cleanup() - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) - return - } - defer release() - - s.log.Info(ctx, logs.StartedWritecacheSealAsync) - if err := s.writeCache.Seal(ctx, prm); err != nil { - s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) - return - } - s.log.Info(ctx, logs.WritecacheSealCompletedAsync) - }() - select { - case <-ctx.Done(): - return ctx.Err() - case <-started: - return nil - } - } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - - return s.writeCache.Seal(ctx, prm) -} diff --git a/pkg/local_object_storage/util/ecinfo.go b/pkg/local_object_storage/util/ecinfo.go deleted file mode 100644 index a92fbceea..000000000 --- a/pkg/local_object_storage/util/ecinfo.go +++ /dev/null @@ -1,25 +0,0 @@ -package util - -import ( - "bytes" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -// MergeECInfo ignores conflicts and rewrites `to` with non empty values -// from `from`. -func MergeECInfo(from, to *objectSDK.ECInfo) *objectSDK.ECInfo { - for _, fchunk := range from.Chunks { - add := true - for _, tchunk := range to.Chunks { - if bytes.Equal(tchunk.ID.GetValue(), fchunk.ID.GetValue()) { - add = false - break - } - } - if add { - to.AddChunk(*objectSDK.NewECChunkFromV2(&fchunk)) - } - } - return to -} diff --git a/pkg/local_object_storage/util/ecinfo_test.go b/pkg/local_object_storage/util/ecinfo_test.go deleted file mode 100644 index 081006088..000000000 --- a/pkg/local_object_storage/util/ecinfo_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package util - -import ( - "crypto/rand" - "testing" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestMergeECInfo(t *testing.T) { - id := generateV2ID() - target := objectSDK.NewECInfo() - var chunk objectSDK.ECChunk - chunk.Total = 2 - chunk.Index = 0 - chunk.SetID(id) - target.AddChunk(chunk) - - t.Run("merge empty", func(t *testing.T) { - to := objectSDK.NewECInfo() - - result := MergeECInfo(target, to) - require.Equal(t, result, target) - }) - - t.Run("merge existed", func(t *testing.T) { - to := objectSDK.NewECInfo() - to.AddChunk(chunk) - - result := MergeECInfo(target, to) - require.Equal(t, result, target) - }) - t.Run("merge extend", func(t *testing.T) { - to := objectSDK.NewECInfo() - var chunk objectSDK.ECChunk - chunk.Total = 2 - chunk.Index = 1 - chunk.SetID(generateV2ID()) - to.AddChunk(chunk) - - result := MergeECInfo(target, to) - require.Equal(t, len(result.Chunks), 2) - }) -} - -func generateV2ID() oid.ID { - var buf [32]byte - _, _ = rand.Read(buf[:]) - - var id oid.ID - _ = id.Decode(buf[:]) - - return id -} diff --git a/pkg/local_object_storage/util/logicerr/error.go b/pkg/local_object_storage/util/logicerr/error.go deleted file mode 100644 index 6b78cb073..000000000 --- a/pkg/local_object_storage/util/logicerr/error.go +++ /dev/null @@ -1,28 +0,0 @@ -package logicerr - -import "errors" - -// Logical is a wrapper for logical errors. -type Logical struct { - err error -} - -// New returns simple error with a provided error message. -func New(msg string) Logical { - return Wrap(errors.New(msg)) -} - -// Error implements the error interface. -func (e Logical) Error() string { - return e.err.Error() -} - -// Wrap wraps arbitrary error into a logical one. -func Wrap(err error) Logical { - return Logical{err: err} -} - -// Unwrap returns underlying error. -func (e Logical) Unwrap() error { - return e.err -} diff --git a/pkg/local_object_storage/util/logicerr/error_test.go b/pkg/local_object_storage/util/logicerr/error_test.go deleted file mode 100644 index c2d06cac4..000000000 --- a/pkg/local_object_storage/util/logicerr/error_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package logicerr - -import ( - "errors" - "fmt" - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestError(t *testing.T) { - t.Run("errors.Is", func(t *testing.T) { - e1 := errors.New("some error") - ee := Wrap(e1) - require.ErrorIs(t, ee, e1) - - e2 := fmt.Errorf("wrap: %w", e1) - ee = Wrap(e2) - require.ErrorIs(t, ee, e1) - require.ErrorIs(t, ee, e2) - - require.Equal(t, errors.Unwrap(ee), e2) - }) - - t.Run("errors.As", func(t *testing.T) { - e1 := testError{42} - ee := Wrap(e1) - - { - var actual testError - require.ErrorAs(t, ee, &actual) - require.Equal(t, e1.data, actual.data) - } - { - var actual Logical - require.ErrorAs(t, ee, &actual) - require.Equal(t, e1, actual.err) - } - - e2 := fmt.Errorf("wrap: %w", e1) - ee = Wrap(e2) - - { - var actual testError - require.ErrorAs(t, ee, &actual) - require.Equal(t, e1.data, actual.data) - } - }) -} - -type testError struct { - data uint64 -} - -func (e testError) Error() string { - return strconv.FormatUint(e.data, 10) -} diff --git a/pkg/local_object_storage/util/splitinfo.go b/pkg/local_object_storage/util/splitinfo.go deleted file mode 100644 index 6ae1c3e46..000000000 --- a/pkg/local_object_storage/util/splitinfo.go +++ /dev/null @@ -1,21 +0,0 @@ -package util - -import ( - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -// MergeSplitInfo ignores conflicts and rewrites `to` with non empty values -// from `from`. -func MergeSplitInfo(from, to *objectSDK.SplitInfo) *objectSDK.SplitInfo { - to.SetSplitID(from.SplitID()) // overwrite SplitID and ignore conflicts - - if lp, ok := from.LastPart(); ok { - to.SetLastPart(lp) - } - - if link, ok := from.Link(); ok { - to.SetLink(link) - } - - return to -} diff --git a/pkg/local_object_storage/util/splitinfo_test.go b/pkg/local_object_storage/util/splitinfo_test.go deleted file mode 100644 index 0b7be5af3..000000000 --- a/pkg/local_object_storage/util/splitinfo_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package util_test - -import ( - "crypto/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/google/uuid" - "github.com/stretchr/testify/require" -) - -func TestMergeSplitInfo(t *testing.T) { - uid, err := uuid.NewUUID() - require.NoError(t, err) - - splitID := objectSDK.NewSplitID() - splitID.SetUUID(uid) - - var rawLinkID, rawLastID [32]byte - var linkID oid.ID - var lastID oid.ID - - _, err = rand.Read(rawLinkID[:]) - require.NoError(t, err) - linkID.SetSHA256(rawLinkID) - - _, err = rand.Read(rawLastID[:]) - require.NoError(t, err) - lastID.SetSHA256(rawLastID) - - target := objectSDK.NewSplitInfo() // target is SplitInfo struct with all fields set - target.SetSplitID(splitID) - target.SetLastPart(lastID) - target.SetLink(linkID) - - t.Run("merge empty", func(t *testing.T) { - to := objectSDK.NewSplitInfo() - - result := util.MergeSplitInfo(target, to) - require.Equal(t, result, target) - }) - - t.Run("merge link", func(t *testing.T) { - from := objectSDK.NewSplitInfo() - from.SetSplitID(splitID) - from.SetLastPart(lastID) - - to := objectSDK.NewSplitInfo() - to.SetLink(linkID) - - result := util.MergeSplitInfo(from, to) - require.Equal(t, result, target) - }) - t.Run("merge last", func(t *testing.T) { - from := objectSDK.NewSplitInfo() - from.SetSplitID(splitID) - from.SetLink(linkID) - - to := objectSDK.NewSplitInfo() - to.SetLastPart(lastID) - - result := util.MergeSplitInfo(from, to) - require.Equal(t, result, target) - }) -} diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go deleted file mode 100644 index fd85b4501..000000000 --- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package benchmark - -import ( - "context" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "github.com/stretchr/testify/require" -) - -func BenchmarkWritecacheSeq(b *testing.B) { - const payloadSize = 8 << 10 - b.Run("bbolt_seq", func(b *testing.B) { - benchmarkPutSeq(b, newCache(b), payloadSize) - }) -} - -func BenchmarkWritecachePar(b *testing.B) { - const payloadSize = 8 << 10 - b.Run("bbolt_par", func(b *testing.B) { - benchmarkPutPar(b, newCache(b), payloadSize) - }) -} - -func BenchmarkWriteAfterDelete(b *testing.B) { - const payloadSize = 32 << 10 - const parallel = 25 - - cache := newCache(b) - benchmarkPutPrepare(b, cache) - b.Run(fmt.Sprintf("%dB_before", payloadSize), func(b *testing.B) { - b.SetParallelism(parallel) - benchmarkRunPar(b, cache, payloadSize) - }) - require.NoError(b, cache.Flush(context.Background(), false, false)) - b.Run(fmt.Sprintf("%dB_after", payloadSize), func(b *testing.B) { - b.SetParallelism(parallel) - benchmarkRunPar(b, cache, payloadSize) - }) - require.NoError(b, cache.Close(context.Background())) -} - -func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { - benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close(context.Background())) }() - - ctx := context.Background() - objGen := testutil.RandObjGenerator{ObjSize: size} - - b.ResetTimer() - for range b.N { - obj := objGen.Next() - rawData, err := obj.Marshal() - require.NoError(b, err, "marshaling object") - prm := common.PutPrm{ - Address: testutil.AddressFromObject(b, obj), - Object: obj, - RawData: rawData, - } - if _, err := cache.Put(ctx, prm); err != nil { - b.Fatalf("putting: %v", err) - } - } -} - -func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) { - benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close(context.Background())) }() - - benchmarkRunPar(b, cache, size) -} - -func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) { - ctx := context.Background() - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - objGen := testutil.RandObjGenerator{ObjSize: size} - for pb.Next() { - obj := objGen.Next() - rawData, err := obj.Marshal() - require.NoError(b, err, "marshaling object") - prm := common.PutPrm{ - Address: testutil.AddressFromObject(b, obj), - Object: obj, - RawData: rawData, - } - if _, err := cache.Put(ctx, prm); err != nil { - b.Fatalf("putting: %v", err) - } - } - }) -} - -func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) { - require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening") - require.NoError(b, cache.Init(context.Background()), "initializing") -} - -type testMetabase struct{} - -func (testMetabase) UpdateStorageID(context.Context, meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error) { - return meta.UpdateStorageIDRes{}, nil -} - -func newCache(b *testing.B) writecache.Cache { - bs := teststore.New( - teststore.WithPut(func(pp common.PutPrm) (common.PutRes, error) { return common.PutRes{}, nil }), - ) - return writecache.New( - writecache.WithPath(b.TempDir()), - writecache.WithBlobstor(bs), - writecache.WithMetabase(testMetabase{}), - writecache.WithMaxCacheSize(256<<30), - ) -} diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go deleted file mode 100644 index ee709ea73..000000000 --- a/pkg/local_object_storage/writecache/cache.go +++ /dev/null @@ -1,144 +0,0 @@ -package writecache - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type cache struct { - options - - mode mode.Mode - modeMtx sync.RWMutex - - // flushCh is a channel with objects to flush. - flushCh chan objectInfo - // cancel is cancel function, protected by modeMtx in Close. - cancel atomic.Value - // wg is a wait group for flush workers. - wg sync.WaitGroup - // fsTree contains big files stored directly on file-system. - fsTree *fstree.FSTree - // counter contains atomic counters for the number of objects stored in cache. - counter *fstree.SimpleCounter -} - -// wcStorageType is used for write-cache operations logging. -const wcStorageType = "write-cache" - -type objectInfo struct { - addr oid.Address - size uint64 -} - -const ( - defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB - defaultMaxCacheSize = 1 << 30 // 1 GiB -) - -var dummyCanceler context.CancelFunc = func() {} - -// New creates new writecache instance. -func New(opts ...Option) Cache { - c := &cache{ - flushCh: make(chan objectInfo), - mode: mode.Disabled, - counter: fstree.NewSimpleCounter(), - - options: options{ - log: logger.NewLoggerWrapper(zap.NewNop()), - maxObjectSize: defaultMaxObjectSize, - workersCount: defaultFlushWorkersCount, - maxCacheSize: defaultMaxCacheSize, - metrics: DefaultMetrics(), - flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize, - qosLimiter: qos.NewNoopLimiter(), - }, - } - - for i := range opts { - opts[i](&c.options) - } - - return c -} - -// SetLogger sets logger. It is used after the shard ID was generated to use it in logs. -func (c *cache) SetLogger(l *logger.Logger) { - c.log = l -} - -func (c *cache) DumpInfo() Info { - return Info{ - Path: c.path, - } -} - -// Open opens and initializes database. Reads object counters from the ObjectCounters instance. -func (c *cache) Open(_ context.Context, mod mode.Mode) error { - c.modeMtx.Lock() - defer c.modeMtx.Unlock() - c.mode = mod - if mod.NoMetabase() { - return nil - } - err := c.openStore(mode.ConvertToComponentModeDegraded(mod)) - if err != nil { - return metaerr.Wrap(err) - } - c.initCounters() - return nil -} - -// Init runs necessary services. -func (c *cache) Init(ctx context.Context) error { - c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode)) - if err := c.flushAndDropBBoltDB(ctx); err != nil { - return fmt.Errorf("flush previous version write-cache database: %w", err) - } - ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache - c.cancel.Store(cancel) - c.runFlushLoop(ctx) - return nil -} - -// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op. -func (c *cache) Close(ctx context.Context) error { - if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil { - cancelValue.(context.CancelFunc)() - } - // We cannot lock mutex for the whole operation duration - // because it is taken by some background workers, so `wg.Wait()` is done without modeMtx. - c.modeMtx.Lock() - c.mode = mode.DegradedReadOnly // prevent new operations from being processed - c.modeMtx.Unlock() - - c.wg.Wait() - - c.modeMtx.Lock() - defer c.modeMtx.Unlock() - - var err error - if c.fsTree != nil { - err = c.fsTree.Close(ctx) - if err != nil { - c.fsTree = nil - } - } - c.metrics.Close() - return nil -} - -func (c *cache) GetMetrics() Metrics { - return c.metrics -} diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go deleted file mode 100644 index 94a0a40db..000000000 --- a/pkg/local_object_storage/writecache/delete.go +++ /dev/null @@ -1,59 +0,0 @@ -package writecache - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Delete removes object from write-cache. -// -// Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache. -// Returns ErrNotInitialized if write-cache has not been initialized yet. -// Returns ErrDegraded if write-cache is in DEGRADED mode. -func (c *cache) Delete(ctx context.Context, addr oid.Address) error { - ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Delete", - trace.WithAttributes( - attribute.String("address", addr.EncodeToString()), - )) - defer span.End() - - deleted := false - storageType := StorageTypeUndefined - startedAt := time.Now() - defer func() { - c.metrics.Delete(time.Since(startedAt), deleted, storageType) - }() - - if !c.modeMtx.TryRLock() { - return ErrNotInitialized - } - defer c.modeMtx.RUnlock() - if c.readOnly() { - return ErrReadOnly - } - if c.noMetabase() { - return ErrDegraded - } - - storageType = StorageTypeFSTree - _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr}) - if err == nil { - storagelog.Write(ctx, c.log, - storagelog.AddressField(addr.EncodeToString()), - storagelog.StorageTypeField(wcStorageType), - storagelog.OpField("fstree DELETE"), - ) - deleted = true - // counter changed by fstree - c.estimateCacheSize() - } - return metaerr.Wrap(err) -} diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go deleted file mode 100644 index 893d27ba2..000000000 --- a/pkg/local_object_storage/writecache/flush.go +++ /dev/null @@ -1,254 +0,0 @@ -package writecache - -import ( - "context" - "errors" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -const ( - // defaultFlushWorkersCount is number of workers for putting objects in main storage. - defaultFlushWorkersCount = 20 - // defaultFlushInterval is default time interval between successive flushes. - defaultFlushInterval = 10 * time.Second -) - -var errIterationCompleted = errors.New("iteration completed") - -// runFlushLoop starts background workers which periodically flush objects to the blobstor. -func (c *cache) runFlushLoop(ctx context.Context) { - if c.disableBackgroundFlush { - return - } - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String()) - fl := newFlushLimiter(c.flushSizeLimit) - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.pushToFlushQueue(ctx, fl) - }() - - for range c.workersCount { - c.wg.Add(1) - go c.workerFlush(ctx, fl) - } -} - -func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { - stopf := context.AfterFunc(ctx, func() { - fl.close() - }) - defer stopf() - - tick := time.NewTicker(defaultFlushInterval) - for { - select { - case <-tick.C: - c.modeMtx.RLock() - if c.readOnly() || c.noMetabase() { - c.modeMtx.RUnlock() - continue - } - - release, err := c.qosLimiter.ReadRequest(ctx) - if err != nil { - c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err)) - c.modeMtx.RUnlock() - continue - } - err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { - if err := fl.acquire(oi.DataSize); err != nil { - return err - } - select { - case c.flushCh <- objectInfo{ - addr: oi.Address, - size: oi.DataSize, - }: - return nil - case <-ctx.Done(): - fl.release(oi.DataSize) - return ctx.Err() - } - }) - release() - if err != nil { - c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) - } - - c.modeMtx.RUnlock() - - // counter changed by fstree - c.estimateCacheSize() - case <-ctx.Done(): - return - } - } -} - -func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) { - defer c.wg.Done() - - var objInfo objectInfo - for { - select { - case objInfo = <-c.flushCh: - c.flushIfAnObjectExistsWorker(ctx, objInfo, fl) - case <-ctx.Done(): - return - } - } -} - -func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) { - defer fl.release(objInfo.size) - - release, err := c.qosLimiter.WriteRequest(ctx) - if err != nil { - c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err)) - return - } - defer release() - res, err := c.fsTree.Get(ctx, common.GetPrm{ - Address: objInfo.addr, - }) - if err != nil { - if !client.IsErrObjectNotFound(err) { - c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err)) - } - return - } - - err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree) - if err != nil { - // Error is handled in flushObject. - return - } - - c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData))) -} - -func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) { - if c.reportError != nil { - c.reportError(ctx, msg, err) - } else { - c.log.Error(ctx, msg, - zap.String("address", addr), - zap.Error(err)) - } -} - -func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { - var prm common.IteratePrm - prm.IgnoreErrors = ignoreErrors - prm.Handler = func(e common.IterationElement) error { - sAddr := e.Address.EncodeToString() - - var obj objectSDK.Object - err := obj.Unmarshal(e.ObjectData) - if err != nil { - c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) - if ignoreErrors { - return nil - } - return err - } - - err = c.flushObject(ctx, &obj, e.ObjectData, StorageTypeFSTree) - if err != nil { - return err - } - - c.deleteFromDisk(ctx, e.Address, uint64(len(e.ObjectData))) - return nil - } - - _, err := c.fsTree.Iterate(ctx, prm) - return err -} - -// flushObject is used to write object directly to the main storage. -func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st StorageType) error { - var err error - - defer func() { - c.metrics.Flush(err == nil, st) - }() - - addr := objectCore.AddressOf(obj) - - var prm common.PutPrm - prm.Object = obj - prm.RawData = data - - res, err := c.blobstor.Put(ctx, prm) - if err != nil { - if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) && - !errors.Is(err, blobstor.ErrNoPlaceFound) { - c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor, - addr.EncodeToString(), err) - } - return err - } - - var updPrm meta.UpdateStorageIDPrm - updPrm.SetAddress(addr) - updPrm.SetStorageID(res.StorageID) - - _, err = c.metabase.UpdateStorageID(ctx, updPrm) - if err != nil { - c.reportFlushError(ctx, logs.FSTreeCantUpdateID, - addr.EncodeToString(), err) - } - return err -} - -// Flush flushes all objects from the write-cache to the main storage. -func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error { - ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush", - trace.WithAttributes( - attribute.Bool("ignore_errors", ignoreErrors), - attribute.Bool("seal", seal), - )) - defer span.End() - - c.modeMtx.Lock() // exclusive lock to not to conflict with background flush - defer c.modeMtx.Unlock() - if c.noMetabase() { - return ErrDegraded - } - - if err := c.flush(ctx, ignoreErrors); err != nil { - return err - } - - if seal { - m := c.mode | mode.ReadOnly - if err := c.setMode(ctx, m, setModePrm{ignoreErrors: ignoreErrors}); err != nil { - return err - } - c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) - } - return nil -} - -func (c *cache) flush(ctx context.Context, ignoreErrors bool) error { - return c.flushFSTree(ctx, ignoreErrors) -} diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go deleted file mode 100644 index 7fc84657c..000000000 --- a/pkg/local_object_storage/writecache/flush_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package writecache - -import ( - "context" - "os" - "path/filepath" - "sync/atomic" - "testing" - - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func TestFlush(t *testing.T) { - testlogger := test.NewLogger(t) - - createCacheFn := func(t *testing.T, mb *meta.DB, bs MainStorage, opts ...Option) Cache { - return New( - append([]Option{ - WithLogger(testlogger), - WithPath(filepath.Join(t.TempDir(), "writecache")), - WithMetabase(mb), - WithBlobstor(bs), - WithDisableBackgroundFlush(), - }, opts...)...) - } - - errCountOpt := func() (Option, *atomic.Uint32) { - cnt := &atomic.Uint32{} - return WithReportErrorFunc(func(ctx context.Context, msg string, err error) { - cnt.Add(1) - testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) - }), cnt - } - - failures := []TestFailureInjector[Option]{ - { - Desc: "fs, read error", - InjectFn: func(t *testing.T, wc Cache) { - c := wc.(*cache) - obj := testutil.GenerateObject() - data, err := obj.Marshal() - require.NoError(t, err) - - var prm common.PutPrm - prm.Address = objectCore.AddressOf(obj) - prm.RawData = data - - _, err = c.fsTree.Put(context.Background(), prm) - require.NoError(t, err) - - p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString() - p = filepath.Join(c.fsTree.RootPath, p[:1], p[1:]) - - _, err = os.Stat(p) // sanity check - require.NoError(t, err) - require.NoError(t, os.Truncate(p, 0)) // corrupt the file contents, so that it can't be unmarshalled - }, - }, - { - Desc: "fs, invalid object", - InjectFn: func(t *testing.T, wc Cache) { - c := wc.(*cache) - var prm common.PutPrm - prm.Address = oidtest.Address() - prm.RawData = []byte{1, 2, 3} - _, err := c.fsTree.Put(context.Background(), prm) - require.NoError(t, err) - }, - }, - } - - runFlushTest(t, createCacheFn, errCountOpt, failures...) -} - -const ( - objCount = 4 - smallSize = 256 -) - -type CreateCacheFunc[Option any] func( - t *testing.T, - meta *meta.DB, - bs MainStorage, - opts ...Option, -) Cache - -type TestFailureInjector[Option any] struct { - Desc string - InjectFn func(*testing.T, Cache) -} - -type objectPair struct { - addr oid.Address - obj *objectSDK.Object -} - -func runFlushTest[Option any]( - t *testing.T, - createCacheFn CreateCacheFunc[Option], - errCountOption func() (Option, *atomic.Uint32), - failures ...TestFailureInjector[Option], -) { - t.Run("no errors", func(t *testing.T) { - wc, bs, mb := newCache(t, createCacheFn) - defer func() { require.NoError(t, wc.Close(context.Background())) }() - objects := putObjects(t, wc) - - require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) - - require.NoError(t, wc.Flush(context.Background(), false, false)) - - check(t, mb, bs, objects) - }) - - t.Run("flush on moving to degraded mode", func(t *testing.T) { - wc, bs, mb := newCache(t, createCacheFn) - defer func() { require.NoError(t, wc.Close(context.Background())) }() - objects := putObjects(t, wc) - - // Blobstor is read-only, so we expect en error from `flush` here. - require.Error(t, wc.SetMode(context.Background(), mode.Degraded)) - - require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) - require.NoError(t, wc.SetMode(context.Background(), mode.Degraded)) - - check(t, mb, bs, objects) - }) - - t.Run("ignore errors", func(t *testing.T) { - for _, f := range failures { - t.Run(f.Desc, func(t *testing.T) { - errCountOpt, errCount := errCountOption() - wc, bs, mb := newCache(t, createCacheFn, errCountOpt) - defer func() { require.NoError(t, wc.Close(context.Background())) }() - objects := putObjects(t, wc) - f.InjectFn(t, wc) - - require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) - - require.Equal(t, uint32(0), errCount.Load()) - require.Error(t, wc.Flush(context.Background(), false, false)) - require.Greater(t, errCount.Load(), uint32(0)) - require.NoError(t, wc.Flush(context.Background(), true, false)) - - check(t, mb, bs, objects) - }) - } - }) -} - -func newCache[Option any]( - t *testing.T, - createCacheFn CreateCacheFunc[Option], - opts ...Option, -) (Cache, *blobstor.BlobStor, *meta.DB) { - dir := t.TempDir() - mb := meta.New( - meta.WithPath(filepath.Join(dir, "meta")), - meta.WithEpochState(dummyEpoch{})) - require.NoError(t, mb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.Init(context.Background())) - - bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(dir, "blob")), - fstree.WithDepth(0), - fstree.WithDirNameLen(1)), - }, - })) - require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init(context.Background())) - - wc := createCacheFn(t, mb, bs, opts...) - require.NoError(t, wc.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, wc.Init(context.Background())) - - // First set mode for metabase and blobstor to prevent background flushes. - require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly)) - require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly)) - - return wc, bs, mb -} - -func putObject(t *testing.T, c Cache, size int) objectPair { - obj := testutil.GenerateObjectWithSize(size) - data, err := obj.Marshal() - require.NoError(t, err) - - var prm common.PutPrm - prm.Address = objectCore.AddressOf(obj) - prm.Object = obj - prm.RawData = data - - _, err = c.Put(context.Background(), prm) - require.NoError(t, err) - - return objectPair{prm.Address, prm.Object} -} - -func putObjects(t *testing.T, c Cache) []objectPair { - objects := make([]objectPair, objCount) - for i := range objects { - objects[i] = putObject(t, c, 1+(i%2)*smallSize) - } - return objects -} - -func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPair) { - for i := range objects { - var mPrm meta.StorageIDPrm - mPrm.SetAddress(objects[i].addr) - - mRes, err := mb.StorageID(context.Background(), mPrm) - require.NoError(t, err) - - var prm common.GetPrm - prm.Address = objects[i].addr - prm.StorageID = mRes.StorageID() - - res, err := bs.Get(context.Background(), prm) - require.NoError(t, err, objects[i].addr) - require.Equal(t, objects[i].obj, res.Object) - } -} - -type dummyEpoch struct{} - -func (dummyEpoch) CurrentEpoch() uint64 { - return 0 -} diff --git a/pkg/local_object_storage/writecache/generic_test.go b/pkg/local_object_storage/writecache/generic_test.go deleted file mode 100644 index 2913ed13b..000000000 --- a/pkg/local_object_storage/writecache/generic_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package writecache - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" -) - -func TestGeneric(t *testing.T) { - storagetest.TestAll(t, func(t *testing.T) storagetest.Component { - return New( - WithLogger(test.NewLogger(t)), - WithFlushWorkersCount(2), - WithPath(t.TempDir())) - }) -} diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/get.go deleted file mode 100644 index c0847a65f..000000000 --- a/pkg/local_object_storage/writecache/get.go +++ /dev/null @@ -1,112 +0,0 @@ -package writecache - -import ( - "bytes" - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Get returns object from write-cache. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache. -func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - saddr := addr.EncodeToString() - - ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Get", - trace.WithAttributes( - attribute.String("address", saddr), - )) - defer span.End() - - if !c.modeMtx.TryRLock() { - return nil, ErrNotInitialized - } - defer c.modeMtx.RUnlock() - if c.mode.NoMetabase() { - return nil, ErrDegraded - } - - obj, err := c.getInternal(ctx, addr) - return obj, metaerr.Wrap(err) -} - -func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - found := false - storageType := StorageTypeUndefined - startedAt := time.Now() - defer func() { - c.metrics.Get(time.Since(startedAt), found, storageType) - }() - - res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr}) - if err != nil { - return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - found = true - storageType = StorageTypeFSTree - return res.Object, nil -} - -// Head returns object header from write-cache. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache. -func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - saddr := addr.EncodeToString() - - ctx, span := tracing.StartSpanFromContext(ctx, "Head", - trace.WithAttributes( - attribute.String("address", saddr), - )) - defer span.End() - - if !c.modeMtx.TryRLock() { - return nil, ErrNotInitialized - } - defer c.modeMtx.RUnlock() - if c.mode.NoMetabase() { - return nil, ErrDegraded - } - - obj, err := c.getInternal(ctx, addr) - if err != nil { - return nil, metaerr.Wrap(err) - } - - return obj.CutPayload(), nil -} - -// Get fetches object from the underlying database. -// Key should be a stringified address. -// -// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in db. -func Get(db *bbolt.DB, key []byte) ([]byte, error) { - if db == nil { - return nil, ErrNotInitialized - } - var value []byte - err := db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - if b == nil { - return ErrNoDefaultBucket - } - value = b.Get(key) - if value == nil { - return logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - value = bytes.Clone(value) - return nil - }) - return value, metaerr.Wrap(err) -} diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go deleted file mode 100644 index e369fbd50..000000000 --- a/pkg/local_object_storage/writecache/iterate.go +++ /dev/null @@ -1,39 +0,0 @@ -package writecache - -import ( - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" -) - -// ErrNoDefaultBucket is returned by IterateDB when default bucket for objects is missing. -var ErrNoDefaultBucket = errors.New("no default bucket") - -// IterateDB iterates over all objects stored in bbolt.DB instance and passes them to f until error return. -// It is assumed that db is an underlying database of some WriteCache instance. -// -// Returns ErrNoDefaultBucket if there is no default bucket in db. -// -// DB must not be nil and should be opened. -func IterateDB(db *bbolt.DB, f func(oid.Address) error) error { - return metaerr.Wrap(db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(defaultBucket) - if b == nil { - return ErrNoDefaultBucket - } - - var addr oid.Address - - return b.ForEach(func(k, _ []byte) error { - err := addr.DecodeString(string(k)) - if err != nil { - return fmt.Errorf("parse object address: %w", err) - } - - return f(addr) - }) - })) -} diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go deleted file mode 100644 index 0e020b36e..000000000 --- a/pkg/local_object_storage/writecache/limiter.go +++ /dev/null @@ -1,66 +0,0 @@ -package writecache - -import ( - "errors" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" -) - -var errLimiterClosed = errors.New("acquire failed: limiter closed") - -// flushLimiter is used to limit the total size of objects -// being flushed to blobstore at the same time. This is a necessary -// limitation so that the flushing process does not have -// a strong impact on user requests. -type flushLimiter struct { - count, size uint64 - maxSize uint64 - cond *sync.Cond - closed bool -} - -func newFlushLimiter(maxSize uint64) *flushLimiter { - return &flushLimiter{ - maxSize: maxSize, - cond: sync.NewCond(&sync.Mutex{}), - } -} - -func (l *flushLimiter) acquire(size uint64) error { - l.cond.L.Lock() - defer l.cond.L.Unlock() - - // it is allowed to overflow maxSize to allow flushing objects with size > maxSize - for l.count > 0 && l.size+size > l.maxSize && !l.closed { - l.cond.Wait() - if l.closed { - return errLimiterClosed - } - } - l.count++ - l.size += size - return nil -} - -func (l *flushLimiter) release(size uint64) { - l.cond.L.Lock() - defer l.cond.L.Unlock() - - assert.True(l.size >= size, "flushLimiter: invalid size") - l.size -= size - - assert.True(l.count > 0, "flushLimiter: invalid count") - l.count-- - - l.cond.Broadcast() -} - -func (l *flushLimiter) close() { - l.cond.L.Lock() - defer l.cond.L.Unlock() - - l.closed = true - - l.cond.Broadcast() -} diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go deleted file mode 100644 index 1ca3e1156..000000000 --- a/pkg/local_object_storage/writecache/limiter_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package writecache - -import ( - "sync/atomic" - "testing" - - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestLimiter(t *testing.T) { - var maxSize uint64 = 10 - var single uint64 = 3 - l := newFlushLimiter(uint64(maxSize)) - var currSize atomic.Int64 - var eg errgroup.Group - for range 10_000 { - eg.Go(func() error { - defer l.release(single) - defer currSize.Add(-1) - l.acquire(single) - require.True(t, currSize.Add(1) <= 3) - return nil - }) - } - require.NoError(t, eg.Wait()) -} diff --git a/pkg/local_object_storage/writecache/metrics.go b/pkg/local_object_storage/writecache/metrics.go deleted file mode 100644 index e3641f85e..000000000 --- a/pkg/local_object_storage/writecache/metrics.go +++ /dev/null @@ -1,60 +0,0 @@ -package writecache - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" -) - -type StorageType string - -func (t StorageType) String() string { - return string(t) -} - -const ( - StorageTypeUndefined StorageType = "null" - StorageTypeDB StorageType = "db" - StorageTypeFSTree StorageType = "fstree" -) - -type Metrics interface { - SetShardID(string) - Get(d time.Duration, success bool, st StorageType) - Delete(d time.Duration, success bool, st StorageType) - Put(d time.Duration, success bool, st StorageType) - Flush(success bool, st StorageType) - Evict(st StorageType) - - SetEstimateSize(uint64) - SetMode(m mode.ComponentMode) - SetActualCounters(uint64) - SetPath(path string) - Close() -} - -func DefaultMetrics() Metrics { return metricsStub{} } - -type metricsStub struct{} - -func (metricsStub) SetShardID(string) {} - -func (metricsStub) SetPath(string) {} - -func (metricsStub) Get(time.Duration, bool, StorageType) {} - -func (metricsStub) Delete(time.Duration, bool, StorageType) {} - -func (metricsStub) Put(time.Duration, bool, StorageType) {} - -func (metricsStub) SetEstimateSize(uint64) {} - -func (metricsStub) SetMode(mode.ComponentMode) {} - -func (metricsStub) SetActualCounters(uint64) {} - -func (metricsStub) Flush(bool, StorageType) {} - -func (metricsStub) Evict(StorageType) {} - -func (metricsStub) Close() {} diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go deleted file mode 100644 index c491be60b..000000000 --- a/pkg/local_object_storage/writecache/mode.go +++ /dev/null @@ -1,128 +0,0 @@ -package writecache - -import ( - "context" - "errors" - "fmt" - "os" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type setModePrm struct { - ignoreErrors bool - shrink bool -} - -// SetMode sets write-cache mode of operation. -// When shard is put in read-only mode all objects in memory are flushed to disk -// and all background jobs are suspended. -func (c *cache) SetMode(ctx context.Context, m mode.Mode) error { - ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode", - trace.WithAttributes( - attribute.String("mode", m.String()), - )) - defer span.End() - - c.modeMtx.Lock() - defer c.modeMtx.Unlock() - - err := c.setMode(ctx, m, setModePrm{ignoreErrors: true}) - if err == nil { - c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m)) - } - return err -} - -// setMode applies new mode. Must be called with cache.modeMtx lock taken. -func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error { - var err error - turnOffMeta := m.NoMetabase() - - if turnOffMeta && !c.mode.NoMetabase() { - err = c.flush(ctx, prm.ignoreErrors) - if err != nil { - return err - } - } - - if err := c.closeStorage(ctx, prm.shrink); err != nil { - return err - } - - // Suspend producers to ensure there are channel send operations in fly. - // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty - // guarantees that there are no in-fly operations. - for len(c.flushCh) != 0 { - c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush) - time.Sleep(time.Second) - } - - if turnOffMeta { - c.mode = m - return nil - } - - if err = c.openStore(mode.ConvertToComponentModeDegraded(m)); err != nil { - return err - } - - c.mode = m - return nil -} - -func (c *cache) closeStorage(ctx context.Context, shrink bool) error { - if c.fsTree == nil { - return nil - } - if !shrink { - if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("close write-cache storage: %w", err) - } - return nil - } - - empty := true - _, err := c.fsTree.Iterate(ctx, common.IteratePrm{ - Handler: func(common.IterationElement) error { - return errIterationCompleted - }, - }) - if err != nil { - if errors.Is(err, errIterationCompleted) { - empty = false - } else { - return fmt.Errorf("check write-cache items: %w", err) - } - } - if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("close write-cache storage: %w", err) - } - if empty { - err := os.RemoveAll(c.path) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove write-cache files: %w", err) - } - } else { - c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty) - } - return nil -} - -// readOnly returns true if current mode is read-only. -// `c.modeMtx` must be taken. -func (c *cache) readOnly() bool { - return c.mode.ReadOnly() -} - -// noMetabase returns true if c is operating without the metabase. -// `c.modeMtx` must be taken. -func (c *cache) noMetabase() bool { - return c.mode.NoMetabase() -} diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go deleted file mode 100644 index 4fbadbc64..000000000 --- a/pkg/local_object_storage/writecache/mode_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package writecache - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/stretchr/testify/require" -) - -func TestMode(t *testing.T) { - t.Parallel() - wc := New( - WithLogger(test.NewLogger(t)), - WithFlushWorkersCount(2), - WithPath(t.TempDir())) - - require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly)) - require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Init(context.Background())) - require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Close(context.Background())) - - require.NoError(t, wc.Open(context.Background(), mode.Degraded)) - require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Init(context.Background())) - require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Close(context.Background())) -} diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go deleted file mode 100644 index a4f98ad06..000000000 --- a/pkg/local_object_storage/writecache/options.go +++ /dev/null @@ -1,146 +0,0 @@ -package writecache - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// Option represents write-cache configuration option. -type Option func(*options) - -type options struct { - log *logger.Logger - // path is a path to a directory for write-cache. - path string - // blobstor is the main persistent storage. - blobstor MainStorage - // metabase is the metabase instance. - metabase Metabase - // maxObjectSize is the maximum size of the object stored in the write-cache. - maxObjectSize uint64 - // workersCount is the number of workers flushing objects in parallel. - workersCount int - // maxCacheSize is the maximum total size of all objects saved in cache (DB + FS). - // 1 GiB by default. - maxCacheSize uint64 - // maxCacheCount is the maximum total count of all object saved in cache. - // 0 (no limit) by default. - maxCacheCount uint64 - // noSync is true iff FSTree allows unsynchronized writes. - noSync bool - // reportError is the function called when encountering disk errors in background workers. - reportError func(context.Context, string, error) - // metrics is metrics implementation - metrics Metrics - // disableBackgroundFlush is for testing purposes only. - disableBackgroundFlush bool - // flushSizeLimit is total size of flushing objects. - flushSizeLimit uint64 - // qosLimiter used to limit flush RPS. - qosLimiter qos.Limiter -} - -// WithLogger sets logger. -func WithLogger(log *logger.Logger) Option { - return func(o *options) { - o.log = log - } -} - -// WithPath sets path to writecache db. -func WithPath(path string) Option { - return func(o *options) { - o.path = path - } -} - -// WithBlobstor sets main object storage. -func WithBlobstor(bs MainStorage) Option { - return func(o *options) { - o.blobstor = bs - } -} - -// WithMetabase sets metabase. -func WithMetabase(db Metabase) Option { - return func(o *options) { - o.metabase = db - } -} - -// WithMaxObjectSize sets maximum object size to be stored in write-cache. -func WithMaxObjectSize(sz uint64) Option { - return func(o *options) { - if sz > 0 { - o.maxObjectSize = sz - } - } -} - -func WithFlushWorkersCount(c int) Option { - return func(o *options) { - if c > 0 { - o.workersCount = c - } - } -} - -// WithMaxCacheSize sets maximum write-cache size in bytes. -func WithMaxCacheSize(sz uint64) Option { - return func(o *options) { - o.maxCacheSize = sz - } -} - -// WithMaxCacheCount sets maximum write-cache objects count. -func WithMaxCacheCount(v uint64) Option { - return func(o *options) { - o.maxCacheCount = v - } -} - -// WithNoSync sets an option to allow returning to caller on PUT before write is persisted. -// Note, that we use this flag for FSTree only and DO NOT use it for a bolt DB because -// we cannot yet properly handle the corrupted database during the startup. This SHOULD NOT -// be relied upon and may be changed in future. -func WithNoSync(noSync bool) Option { - return func(o *options) { - o.noSync = noSync - } -} - -// WithReportErrorFunc sets error reporting function. -func WithReportErrorFunc(f func(context.Context, string, error)) Option { - return func(o *options) { - o.reportError = f - } -} - -// WithMetrics sets metrics implementation. -func WithMetrics(metrics Metrics) Option { - return func(o *options) { - o.metrics = metrics - } -} - -// WithDisableBackgroundFlush disables background flush, for testing purposes only. -func WithDisableBackgroundFlush() Option { - return func(o *options) { - o.disableBackgroundFlush = true - } -} - -// WithFlushSizeLimit sets flush size limit. -func WithFlushSizeLimit(v uint64) Option { - return func(o *options) { - o.flushSizeLimit = v - } -} - -func WithQoSLimiter(l qos.Limiter) Option { - return func(o *options) { - o.qosLimiter = l - } -} diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go deleted file mode 100644 index 2fbf50913..000000000 --- a/pkg/local_object_storage/writecache/put.go +++ /dev/null @@ -1,89 +0,0 @@ -package writecache - -import ( - "context" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Put puts object to write-cache. -// -// Returns ErrReadOnly if write-cache is in R/O mode. -// Returns ErrNotInitialized if write-cache has not been initialized yet. -// Returns ErrOutOfSpace if saving an object leads to WC's size overflow. -// Returns ErrBigObject if an objects exceeds maximum object size. -func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Put", - trace.WithAttributes( - attribute.String("address", prm.Address.EncodeToString()), - attribute.Bool("dont_compress", prm.DontCompress), - )) - defer span.End() - - startedAt := time.Now() - added := false - storageType := StorageTypeUndefined - defer func() { - c.metrics.Put(time.Since(startedAt), added, storageType) - }() - - if !c.modeMtx.TryRLock() { - return common.PutRes{}, ErrNotInitialized - } - defer c.modeMtx.RUnlock() - if c.readOnly() { - return common.PutRes{}, ErrReadOnly - } - if c.noMetabase() { - return common.PutRes{}, ErrDegraded - } - - sz := uint64(len(prm.RawData)) - if sz > c.maxObjectSize { - return common.PutRes{}, ErrBigObject - } - - storageType = StorageTypeFSTree - err := c.putBig(ctx, prm) - if err == nil { - added = true - } - return common.PutRes{}, metaerr.Wrap(err) -} - -// putBig writes object to FSTree and pushes it to the flush workers queue. -func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { - if prm.RawData == nil { // foolproof: RawData should be marshalled by shard. - data, err := prm.Object.Marshal() - if err != nil { - return fmt.Errorf("cannot marshal object: %w", err) - } - prm.RawData = data - } - size := uint64(len(prm.RawData)) - if !c.hasEnoughSpace(size) { - return ErrOutOfSpace - } - - _, err := c.fsTree.Put(ctx, prm) - if err != nil { - return err - } - - storagelog.Write(ctx, c.log, - storagelog.AddressField(prm.Address.EncodeToString()), - storagelog.StorageTypeField(wcStorageType), - storagelog.OpField("fstree PUT"), - ) - // counter changed by fstree - c.estimateCacheSize() - - return nil -} diff --git a/pkg/local_object_storage/writecache/seal.go b/pkg/local_object_storage/writecache/seal.go deleted file mode 100644 index fa224f5e0..000000000 --- a/pkg/local_object_storage/writecache/seal.go +++ /dev/null @@ -1,37 +0,0 @@ -package writecache - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -func (c *cache) Seal(ctx context.Context, prm SealPrm) error { - ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Seal", - trace.WithAttributes( - attribute.Bool("ignore_errors", prm.IgnoreErrors), - attribute.Bool("restore_mode", prm.RestoreMode), - )) - defer span.End() - - c.modeMtx.Lock() - defer c.modeMtx.Unlock() - - sourceMode := c.mode - // flush will be done by setMode - err := c.setMode(ctx, mode.DegradedReadOnly, setModePrm{ignoreErrors: prm.IgnoreErrors, shrink: prm.Shrink}) - if err != nil { - return err - } - c.metrics.SetMode(mode.ComponentDisabled) - if prm.RestoreMode { - err = c.setMode(ctx, sourceMode, setModePrm{ignoreErrors: prm.IgnoreErrors}) - if err == nil { - c.metrics.SetMode(mode.ConvertToComponentMode(sourceMode)) - } - } - return err -} diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go deleted file mode 100644 index 7a52d3672..000000000 --- a/pkg/local_object_storage/writecache/state.go +++ /dev/null @@ -1,20 +0,0 @@ -package writecache - -func (c *cache) estimateCacheSize() (uint64, uint64) { - count, size := c.counter.CountSize() - c.metrics.SetEstimateSize(size) - c.metrics.SetActualCounters(count) - return count, size -} - -func (c *cache) hasEnoughSpace(objectSize uint64) bool { - count, size := c.estimateCacheSize() - if c.maxCacheCount > 0 && count+1 > c.maxCacheCount { - return false - } - return c.maxCacheSize >= size+objectSize -} - -func (c *cache) initCounters() { - c.estimateCacheSize() -} diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go deleted file mode 100644 index e88566cdf..000000000 --- a/pkg/local_object_storage/writecache/storage.go +++ /dev/null @@ -1,57 +0,0 @@ -package writecache - -import ( - "context" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -func (c *cache) openStore(mod mode.ComponentMode) error { - err := util.MkdirAllX(c.path, os.ModePerm) - if err != nil { - return err - } - - c.fsTree = fstree.New( - fstree.WithPath(c.path), - fstree.WithPerm(os.ModePerm), - fstree.WithDepth(1), - fstree.WithDirNameLen(1), - fstree.WithNoSync(c.noSync), - fstree.WithFileCounter(c.counter), - ) - if err := c.fsTree.Open(mod); err != nil { - return fmt.Errorf("open FSTree: %w", err) - } - if err := c.fsTree.Init(); err != nil { - return fmt.Errorf("init FSTree: %w", err) - } - - return nil -} - -func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) { - _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size}) - if err != nil && !client.IsErrObjectNotFound(err) { - c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) - } else if err == nil { - storagelog.Write(ctx, c.log, - storagelog.AddressField(addr.EncodeToString()), - storagelog.StorageTypeField(wcStorageType), - storagelog.OpField("fstree DELETE"), - ) - c.metrics.Evict(StorageTypeFSTree) - // counter changed by fstree - c.estimateCacheSize() - } -} diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go deleted file mode 100644 index 5eb341ba4..000000000 --- a/pkg/local_object_storage/writecache/upgrade.go +++ /dev/null @@ -1,110 +0,0 @@ -package writecache - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" - "time" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.etcd.io/bbolt" -) - -const dbName = "small.bolt" - -var defaultBucket = []byte{0} - -func (c *cache) flushAndDropBBoltDB(ctx context.Context) error { - _, err := os.Stat(filepath.Join(c.path, dbName)) - if err != nil && os.IsNotExist(err) { - return nil - } - if err != nil { - return fmt.Errorf("check write-cache database existence: %w", err) - } - db, err := OpenDB(c.path, true, os.OpenFile) - if err != nil { - return fmt.Errorf("open write-cache database: %w", err) - } - defer func() { - _ = db.Close() - }() - - var last string - for { - batch, err := c.readNextDBBatch(db, last) - if err != nil { - return err - } - if len(batch) == 0 { - break - } - for _, item := range batch { - var obj objectSDK.Object - if err := obj.Unmarshal(item.data); err != nil { - return fmt.Errorf("unmarshal object from database: %w", err) - } - if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil { - return fmt.Errorf("flush object from database: %w", err) - } - } - last = batch[len(batch)-1].address - } - if err := db.Close(); err != nil { - return fmt.Errorf("close write-cache database: %w", err) - } - if err := os.Remove(filepath.Join(c.path, dbName)); err != nil { - return fmt.Errorf("remove write-cache database: %w", err) - } - return nil -} - -type batchItem struct { - data []byte - address string -} - -func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) { - const batchSize = 100 - var batch []batchItem - err := db.View(func(tx *bbolt.Tx) error { - var addr oid.Address - - b := tx.Bucket(defaultBucket) - cs := b.Cursor() - for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() { - sa := string(k) - if sa == last { - continue - } - if err := addr.DecodeString(sa); err != nil { - return fmt.Errorf("decode address from database: %w", err) - } - - batch = append(batch, batchItem{data: bytes.Clone(data), address: sa}) - if len(batch) == batchSize { - return errIterationCompleted - } - } - return nil - }) - if err == nil || errors.Is(err, errIterationCompleted) { - return batch, nil - } - return nil, err -} - -// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true. -func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) { - return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{ - NoFreelistSync: true, - ReadOnly: ro, - Timeout: 100 * time.Millisecond, - OpenFile: openFile, - }) -} diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go deleted file mode 100644 index 7ed511318..000000000 --- a/pkg/local_object_storage/writecache/writecache.go +++ /dev/null @@ -1,76 +0,0 @@ -package writecache - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Info groups the information about write-cache. -type Info struct { - // Full path to the write-cache. - Path string -} - -type SealPrm struct { - IgnoreErrors bool - RestoreMode bool - Shrink bool -} - -// Cache represents write-cache for objects. -type Cache interface { - Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) - Head(context.Context, oid.Address) (*objectSDK.Object, error) - // Delete removes object referenced by the given oid.Address from the - // Cache. Returns any error encountered that prevented the object to be - // removed. - // - // Returns apistatus.ObjectNotFound if object is missing in the Cache. - // Returns ErrReadOnly if the Cache is currently in the read-only mode. - Delete(context.Context, oid.Address) error - Put(context.Context, common.PutPrm) (common.PutRes, error) - SetMode(context.Context, mode.Mode) error - SetLogger(*logger.Logger) - DumpInfo() Info - Flush(context.Context, bool, bool) error - Seal(context.Context, SealPrm) error - - Init(context.Context) error - Open(ctx context.Context, mode mode.Mode) error - Close(context.Context) error - GetMetrics() Metrics -} - -// MainStorage is the interface of the underlying storage of Cache implementations. -type MainStorage interface { - Compressor() *compression.Compressor - Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) - Put(context.Context, common.PutPrm) (common.PutRes, error) -} - -// Metabase is the interface of the metabase used by Cache implementations. -type Metabase interface { - UpdateStorageID(context.Context, meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error) -} - -var ( - // ErrReadOnly is returned when Put/Write is performed in a read-only mode. - ErrReadOnly = logicerr.New("write-cache is in read-only mode") - // ErrDegraded is returned when writecache is in degraded mode. - ErrDegraded = logicerr.New("write-cache is in degraded mode") - // ErrNotInitialized is returned when write-cache is initializing. - ErrNotInitialized = logicerr.New("write-cache is not initialized yet") - // ErrBigObject is returned when object is too big to be placed in cache. - ErrBigObject = errors.New("too big object") - // ErrOutOfSpace is returned when there is no space left to put a new object. - ErrOutOfSpace = errors.New("no space left in the write cache") -) diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go deleted file mode 100644 index 2849f3052..000000000 --- a/pkg/morph/client/actor.go +++ /dev/null @@ -1,144 +0,0 @@ -package client - -import ( - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/config/netmode" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -type actorProvider interface { - GetActor() *actor.Actor - GetRPCActor() actor.RPCActor -} - -// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken. -// This leads to an invalidation of an rpc actor within Client. That means the -// components that are initilized with the rpc actor may unintentionally use -// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent -// this situation, getting the rpc actor from Client. -type SwitchRPCGuardedActor struct { - actorProvider actorProvider -} - -func NewSwitchRPCGuardedActor(c *Client) *SwitchRPCGuardedActor { - return &SwitchRPCGuardedActor{ - actorProvider: c, - } -} - -func (a *SwitchRPCGuardedActor) Call(contract util.Uint160, operation string, params ...any) (*result.Invoke, error) { - return a.actorProvider.GetActor().Call(contract, operation, params...) -} - -func (a *SwitchRPCGuardedActor) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) { - return a.actorProvider.GetActor().CalculateNetworkFee(tx) -} - -func (a *SwitchRPCGuardedActor) CalculateValidUntilBlock() (uint32, error) { - return a.actorProvider.GetActor().CalculateValidUntilBlock() -} - -func (a *SwitchRPCGuardedActor) GetBlockCount() (uint32, error) { - return a.actorProvider.GetActor().GetBlockCount() -} - -func (a *SwitchRPCGuardedActor) GetNetwork() netmode.Magic { - return a.actorProvider.GetActor().GetNetwork() -} - -func (a *SwitchRPCGuardedActor) GetVersion() result.Version { - return a.actorProvider.GetActor().GetVersion() -} - -func (a *SwitchRPCGuardedActor) MakeCall(contract util.Uint160, method string, params ...any) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeCall(contract, method, params...) -} - -func (a *SwitchRPCGuardedActor) MakeRun(script []byte) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeRun(script) -} - -func (a *SwitchRPCGuardedActor) MakeTunedCall(contract util.Uint160, method string, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier, params ...any) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeTunedCall(contract, method, attrs, txHook, params...) -} - -func (a *SwitchRPCGuardedActor) MakeTunedRun(script []byte, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeTunedRun(script, attrs, txHook) -} - -func (a *SwitchRPCGuardedActor) MakeUncheckedRun(script []byte, sysfee int64, attrs []transaction.Attribute, txHook actor.TransactionModifier) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeUncheckedRun(script, sysfee, attrs, txHook) -} - -func (a *SwitchRPCGuardedActor) MakeUnsignedCall(contract util.Uint160, method string, attrs []transaction.Attribute, params ...any) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeUnsignedCall(contract, method, attrs, params...) -} - -func (a *SwitchRPCGuardedActor) MakeUnsignedRun(script []byte, attrs []transaction.Attribute) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeUnsignedRun(script, attrs) -} - -func (a *SwitchRPCGuardedActor) MakeUnsignedUncheckedRun(script []byte, sysFee int64, attrs []transaction.Attribute) (*transaction.Transaction, error) { - return a.actorProvider.GetActor().MakeUnsignedUncheckedRun(script, sysFee, attrs) -} - -func (a *SwitchRPCGuardedActor) Send(tx *transaction.Transaction) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().Send(tx) -} - -func (a *SwitchRPCGuardedActor) SendCall(contract util.Uint160, method string, params ...any) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().SendCall(contract, method, params...) -} - -func (a *SwitchRPCGuardedActor) SendRun(script []byte) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().SendRun(script) -} - -func (a *SwitchRPCGuardedActor) SendTunedCall(contract util.Uint160, method string, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier, params ...any) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().SendTunedCall(contract, method, attrs, txHook, params...) -} - -func (a *SwitchRPCGuardedActor) SendTunedRun(script []byte, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().SendTunedRun(script, attrs, txHook) -} - -func (a *SwitchRPCGuardedActor) SendUncheckedRun(script []byte, sysfee int64, attrs []transaction.Attribute, txHook actor.TransactionModifier) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().SendUncheckedRun(script, sysfee, attrs, txHook) -} - -func (a *SwitchRPCGuardedActor) Sender() util.Uint160 { - return a.actorProvider.GetActor().Sender() -} - -func (a *SwitchRPCGuardedActor) Sign(tx *transaction.Transaction) error { - return a.actorProvider.GetActor().Sign(tx) -} - -func (a *SwitchRPCGuardedActor) SignAndSend(tx *transaction.Transaction) (util.Uint256, uint32, error) { - return a.actorProvider.GetActor().SignAndSend(tx) -} - -func (a *SwitchRPCGuardedActor) CallAndExpandIterator(contract util.Uint160, method string, maxItems int, params ...any) (*result.Invoke, error) { - return a.actorProvider.GetActor().CallAndExpandIterator(contract, method, maxItems, params...) -} - -func (a *SwitchRPCGuardedActor) TerminateSession(sessionID uuid.UUID) error { - return a.actorProvider.GetActor().TerminateSession(sessionID) -} - -func (a *SwitchRPCGuardedActor) TraverseIterator(sessionID uuid.UUID, iterator *result.Iterator, num int) ([]stackitem.Item, error) { - return a.actorProvider.GetActor().TraverseIterator(sessionID, iterator, num) -} - -func (a *SwitchRPCGuardedActor) GetRPCActor() actor.RPCActor { - return a.actorProvider.GetRPCActor() -} - -func (a *SwitchRPCGuardedActor) GetRPCInvoker() invoker.RPCInvoke { - return a.actorProvider.GetRPCActor() -} diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go deleted file mode 100644 index 4462daab4..000000000 --- a/pkg/morph/client/balance/balanceOf.go +++ /dev/null @@ -1,33 +0,0 @@ -package balance - -import ( - "context" - "fmt" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// BalanceOf receives the amount of funds in the client's account -// through the Balance contract call, and returns it. -func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) { - h := id.ScriptHash() - - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(balanceOfMethod) - invokePrm.SetArgs(h) - - prms, err := c.client.TestInvoke(ctx, invokePrm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln) - } - - amount, err := client.BigIntFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err) - } - return amount, nil -} diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go deleted file mode 100644 index f4685b0ab..000000000 --- a/pkg/morph/client/balance/burn.go +++ /dev/null @@ -1,43 +0,0 @@ -package balance - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// BurnPrm groups parameters of Burn operation. -type BurnPrm struct { - to util.Uint160 - amount int64 - id []byte - - client.InvokePrmOptional -} - -// SetTo sets receiver. -func (b *BurnPrm) SetTo(to util.Uint160) { - b.to = to -} - -// SetAmount sets amount. -func (b *BurnPrm) SetAmount(amount int64) { - b.amount = amount -} - -// SetID sets ID. -func (b *BurnPrm) SetID(id []byte) { - b.id = id -} - -// Burn destroys funds from the account. -func (c *Client) Burn(ctx context.Context, p BurnPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(burnMethod) - prm.SetArgs(p.to, p.amount, p.id) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - return err -} diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go deleted file mode 100644 index 1dacb9574..000000000 --- a/pkg/morph/client/balance/client.go +++ /dev/null @@ -1,69 +0,0 @@ -package balance - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS Balance contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static Balance contract client -} - -const ( - transferXMethod = "transferX" - mintMethod = "mint" - burnMethod = "burn" - lockMethod = "lock" - balanceOfMethod = "balanceOf" - decimalsMethod = "decimals" -) - -// NewFromMorph returns the wrapper instance from the raw morph client. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) { - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) - if err != nil { - return nil, fmt.Errorf("create 'balance' contract client: %w", err) - } - - return &Client{ - client: staticClient, - }, nil -} - -// Option allows to set an optional -// parameter of Wrapper. -type Option func(*opts) - -type opts []client.StaticClientOption - -func defaultOpts() *opts { - return &opts{client.TryNotary()} -} - -// AsAlphabet returns option to sign main TX -// of notary requests with client's private -// key. -// -// Considered to be used by IR nodes only. -func AsAlphabet() Option { - return func(o *opts) { - *o = append(*o, client.AsAlphabet()) - } -} diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go deleted file mode 100644 index 57e61d62b..000000000 --- a/pkg/morph/client/balance/decimals.go +++ /dev/null @@ -1,28 +0,0 @@ -package balance - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// Decimals decimal precision of currency transactions -// through the Balance contract call, and returns it. -func (c *Client) Decimals(ctx context.Context) (uint32, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(decimalsMethod) - - prms, err := c.client.TestInvoke(ctx, invokePrm) - if err != nil { - return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err) - } else if ln := len(prms); ln != 1 { - return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln) - } - - decimals, err := client.IntFromStackItem(prms[0]) - if err != nil { - return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err) - } - return uint32(decimals), nil -} diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go deleted file mode 100644 index 83e8b0586..000000000 --- a/pkg/morph/client/balance/lock.go +++ /dev/null @@ -1,55 +0,0 @@ -package balance - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// LockPrm groups parameters of Lock operation. -type LockPrm struct { - id []byte - user util.Uint160 - lock util.Uint160 - amount int64 - dueEpoch int64 - - client.InvokePrmOptional -} - -// SetID sets ID. -func (l *LockPrm) SetID(id []byte) { - l.id = id -} - -// SetUser set user. -func (l *LockPrm) SetUser(user util.Uint160) { - l.user = user -} - -// SetLock sets lock. -func (l *LockPrm) SetLock(lock util.Uint160) { - l.lock = lock -} - -// SetAmount sets amount. -func (l *LockPrm) SetAmount(amount int64) { - l.amount = amount -} - -// SetDueEpoch sets end of the lock. -func (l *LockPrm) SetDueEpoch(dueEpoch int64) { - l.dueEpoch = dueEpoch -} - -// Lock locks fund on the user account. -func (c *Client) Lock(ctx context.Context, p LockPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(lockMethod) - prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - return err -} diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go deleted file mode 100644 index 082ade85e..000000000 --- a/pkg/morph/client/balance/mint.go +++ /dev/null @@ -1,43 +0,0 @@ -package balance - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// MintPrm groups parameters of Mint operation. -type MintPrm struct { - to util.Uint160 - amount int64 - id []byte - - client.InvokePrmOptional -} - -// SetTo sets receiver of the transfer. -func (m *MintPrm) SetTo(to util.Uint160) { - m.to = to -} - -// SetAmount sets amount of the transfer. -func (m *MintPrm) SetAmount(amount int64) { - m.amount = amount -} - -// SetID sets ID. -func (m *MintPrm) SetID(id []byte) { - m.id = id -} - -// Mint sends funds to the account. -func (c *Client) Mint(ctx context.Context, p MintPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(mintMethod) - prm.SetArgs(p.to, p.amount, p.id) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - return err -} diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go deleted file mode 100644 index 870bed166..000000000 --- a/pkg/morph/client/balance/transfer.go +++ /dev/null @@ -1,38 +0,0 @@ -package balance - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// TransferPrm groups parameters of TransferX method. -type TransferPrm struct { - Amount int64 - - From, To user.ID - - Details []byte - - client.InvokePrmOptional -} - -// TransferX transfers p.Amount of GASe-12 from p.From to p.To -// with details p.Details through direct smart contract call. -func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { - from := p.From.ScriptHash() - to := p.To.ScriptHash() - - prm := client.InvokePrm{} - prm.SetMethod(transferXMethod) - prm.SetArgs(from, to, p.Amount, p.Details) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - if err != nil { - return fmt.Errorf("invoke method (%s): %w", transferXMethod, err) - } - return nil -} diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go deleted file mode 100644 index aab058d27..000000000 --- a/pkg/morph/client/client.go +++ /dev/null @@ -1,583 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "math/big" - "sync" - "sync/atomic" - "time" - - nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/google/uuid" - lru "github.com/hashicorp/golang-lru/v2" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "go.uber.org/zap" -) - -// Client is a wrapper over web socket neo-go client -// that provides smart-contract invocation interface -// and notification subscription functionality. -// -// On connection lost tries establishing new connection -// to the next RPC (if any). If no RPC node available, -// switches to inactive mode: any RPC call leads to immediate -// return with ErrConnectionLost error, notification channel -// returned from Client.NotificationChannel is closed. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - cache cache - - logger *logger.Logger // logging component - metrics morphmetrics.Register - - client *rpcclient.WSClient // neo-go websocket client - rpcActor *actor.Actor // neo-go RPC actor - gasToken *nep17.Token // neo-go GAS token wrapper - rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper - nnsHash util.Uint160 // NNS contract hash - - nnsReader *nnsClient.ContractReader // NNS contract wrapper - - acc *wallet.Account // neo account - accAddr util.Uint160 // account's address - - notary *notaryInfo - - cfg cfg - - endpoints endpoints - - // switchLock protects endpoints, inactive, and subscription-related fields. - // It is taken exclusively during endpoint switch and locked in shared mode - // on every normal call. - switchLock sync.RWMutex - - // channel for internal stop - closeChan chan struct{} - closed atomic.Bool - wg sync.WaitGroup - - // indicates that Client is not able to - // establish connection to any of the - // provided RPC endpoints - inactive bool - - // indicates that Client has already started - // goroutine that tries to switch to the higher - // priority RPC node - switchIsActive atomic.Bool -} - -type cache struct { - m sync.RWMutex - - gKey *keys.PublicKey - txHeights *lru.Cache[util.Uint256, uint32] - - metrics metrics.MorphCacheMetrics -} - -func (c *cache) groupKey() *keys.PublicKey { - c.m.RLock() - defer c.m.RUnlock() - - return c.gKey -} - -func (c *cache) setGroupKey(groupKey *keys.PublicKey) { - c.m.Lock() - defer c.m.Unlock() - - c.gKey = groupKey -} - -func (c *cache) invalidate() { - c.m.Lock() - defer c.m.Unlock() - - c.gKey = nil - c.txHeights.Purge() -} - -var ( - // ErrNilClient is returned by functions that expect - // a non-nil Client pointer, but received nil. - ErrNilClient = errors.New("client is nil") - - // ErrConnectionLost is returned when client lost web socket connection - // to the RPC node and has not been able to establish a new one since. - ErrConnectionLost = errors.New("connection to the RPC node has been lost") -) - -// HaltState returned if TestInvoke function processed without panic. -const HaltState = "HALT" - -type notHaltStateError struct { - state, exception string -} - -func (e *notHaltStateError) Error() string { - return fmt.Sprintf( - "chain/client: contract execution finished with state %s; exception: %s", - e.state, - e.exception, - ) -} - -// Invoke invokes contract method by sending transaction into blockchain. -// Returns valid until block value. -// Supported args types: int64, string, util.Uint160, []byte and bool. -func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) { - start := time.Now() - success := false - defer func() { - c.metrics.ObserveInvoke("Invoke", contract.String(), method, success, time.Since(start)) - }() - - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return InvokeRes{}, ErrConnectionLost - } - - txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...) - if err != nil { - return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err) - } - - c.logger.Debug(ctx, logs.ClientNeoClientInvoke, - zap.String("method", method), - zap.Uint32("vub", vub), - zap.Stringer("tx_hash", txHash.Reverse())) - - success = true - return InvokeRes{Hash: txHash, VUB: vub}, nil -} - -// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element. -// If cb returns an error, the session is closed and this error is returned as-is. -// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. -// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. -// The default batchSize is 100, the default limit from neo-go. -func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error { - start := time.Now() - success := false - defer func() { - c.metrics.ObserveInvoke("TestInvokeIterator", contract.String(), method, success, time.Since(start)) - }() - - if batchSize <= 0 { - batchSize = invoker.DefaultIteratorResultItems - } - - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - script, err := smartcontract.CreateCallAndPrefetchIteratorScript(contract, method, batchSize, args...) - if err != nil { - return err - } - - val, err := c.rpcActor.Run(script) - if err != nil { - return err - } else if val.State != HaltState { - return ¬HaltStateError{state: val.State, exception: val.FaultException} - } - - arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) - if err != nil { - return err - } - for i := range arr { - if err := cb(arr[i]); err != nil { - return err - } - } - if (sid == uuid.UUID{}) { - success = true - return nil - } - - defer func() { - _ = c.rpcActor.TerminateSession(sid) - }() - - // Batch size for TraverseIterator() can restricted on the server-side. - traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems) - for { - items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize) - if err != nil { - return err - } - - for i := range items { - if err := cb(items[i]); err != nil { - return err - } - } - if len(items) < traverseBatchSize { - break - } - } - success = true - return nil -} - -// TestInvoke invokes contract method locally in neo-go node. This method should -// be used to read data from smart-contract. -func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (res []stackitem.Item, err error) { - start := time.Now() - success := false - defer func() { - c.metrics.ObserveInvoke("TestInvoke", contract.String(), method, success, time.Since(start)) - }() - - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return nil, ErrConnectionLost - } - - val, err := c.rpcActor.Call(contract, method, args...) - if err != nil { - return nil, err - } - - if val.State != HaltState { - return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} - } - - success = true - return val.Stack, nil -} - -// TransferGas to the receiver from local wallet. -func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - txHash, vub, err := c.gasToken.Transfer(c.accAddr, receiver, big.NewInt(int64(amount)), nil) - if err != nil { - return err - } - - c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke, - zap.String("to", receiver.StringLE()), - zap.Stringer("tx_hash", txHash.Reverse()), - zap.Uint32("vub", vub)) - - return nil -} - -func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - transferParams := make([]nep17.TransferParameters, len(receivers)) - receiversLog := make([]string, len(receivers)) - - for i, receiver := range receivers { - transferParams[i] = nep17.TransferParameters{ - From: c.accAddr, - To: receiver, - Amount: big.NewInt(int64(amount)), - Data: nil, - } - receiversLog[i] = receiver.StringLE() - } - - txHash, vub, err := c.gasToken.MultiTransfer(transferParams) - if err != nil { - return err - } - - c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke, - zap.Strings("to", receiversLog), - zap.Stringer("tx_hash", txHash.Reverse()), - zap.Uint32("vub", vub)) - - return nil -} - -// Wait function blocks routing execution until there -// are `n` new blocks in the chain. -// -// Returns only connection errors. -func (c *Client) Wait(ctx context.Context, n uint32) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - var ( - err error - height, newHeight uint32 - ) - - height, err = c.rpcActor.GetBlockCount() - if err != nil { - c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight, - zap.Error(err)) - return nil - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - newHeight, err = c.rpcActor.GetBlockCount() - if err != nil { - c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243, - zap.Error(err)) - return nil - } - - if newHeight >= height+n { - return nil - } - - time.Sleep(c.cfg.waitInterval) - } -} - -// GasBalance returns GAS amount in the client's wallet. -func (c *Client) GasBalance() (res int64, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, ErrConnectionLost - } - - bal, err := c.gasToken.BalanceOf(c.accAddr) - if err != nil { - return 0, err - } - - return bal.Int64(), nil -} - -// Committee returns keys of chain committee from neo native contract. -func (c *Client) Committee() (res keys.PublicKeys, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return nil, ErrConnectionLost - } - - return c.client.GetCommittee() -} - -// TxHalt returns true if transaction has been successfully executed and persisted. -func (c *Client) TxHalt(h util.Uint256) (res bool, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return false, ErrConnectionLost - } - - trig := trigger.Application - aer, err := c.client.GetApplicationLog(h, &trig) - if err != nil { - return false, err - } - return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil -} - -func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return nil, ErrConnectionLost - } - - return c.client.GetApplicationLog(hash, trig) -} - -func (c *Client) GetVersion() (*result.Version, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return nil, ErrConnectionLost - } - - return c.client.GetVersion() -} - -// TxHeight returns true if transaction has been successfully executed and persisted. -func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, ErrConnectionLost - } - - return c.client.GetTransactionHeight(h) -} - -// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain -// stores alphabet node keys of inner ring there, however the sidechain stores both -// alphabet and non alphabet node keys of inner ring. -func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return nil, ErrConnectionLost - } - - list, err := c.roleList(noderoles.NeoFSAlphabet) - if err != nil { - return nil, fmt.Errorf("get alphabet nodes role list: %w", err) - } - - return list, nil -} - -// GetDesignateHash returns hash of the native `RoleManagement` contract. -func (c *Client) GetDesignateHash() util.Uint160 { - return rolemgmt.Hash -} - -func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) { - height, err := c.rpcActor.GetBlockCount() - if err != nil { - return nil, fmt.Errorf("get chain height: %w", err) - } - - return c.rolemgmt.GetDesignatedByRole(r, height) -} - -// MagicNumber returns the magic number of the network -// to which the underlying RPC node client is connected. -func (c *Client) MagicNumber() (uint64, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, ErrConnectionLost - } - - return uint64(c.rpcActor.GetNetwork()), nil -} - -// BlockCount returns block count of the network -// to which the underlying RPC node client is connected. -func (c *Client) BlockCount() (res uint32, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, ErrConnectionLost - } - - return c.rpcActor.GetBlockCount() -} - -// MsPerBlock returns MillisecondsPerBlock network parameter. -func (c *Client) MsPerBlock() (res int64, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, ErrConnectionLost - } - - v := c.rpcActor.GetVersion() - - return int64(v.Protocol.MillisecondsPerBlock), nil -} - -// IsValidScript returns true if invocation script executes with HALT state. -func (c *Client) IsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return false, ErrConnectionLost - } - - res, err := c.client.InvokeScript(script, signers) - if err != nil { - return false, fmt.Errorf("invokeScript: %w", err) - } - - return res.State == vmstate.Halt.String(), nil -} - -func (c *Client) Metrics() morphmetrics.Register { - return c.metrics -} - -func (c *Client) setActor(act *actor.Actor) { - c.rpcActor = act - c.gasToken = nep17.New(act, gas.Hash) - c.rolemgmt = rolemgmt.New(act) - c.nnsReader = nnsClient.NewReader(act, c.nnsHash) -} - -func (c *Client) GetActor() *actor.Actor { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - return c.rpcActor -} - -func (c *Client) GetRPCActor() actor.RPCActor { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - return c.client -} diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go deleted file mode 100644 index e4dcd0db7..000000000 --- a/pkg/morph/client/constructor.go +++ /dev/null @@ -1,319 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "net" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - lru "github.com/hashicorp/golang-lru/v2" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "go.uber.org/zap" -) - -// Option is a client configuration change function. -type Option func(*cfg) - -// Callback is a function that is going to be called -// on certain Client's state. -type Callback func() - -// groups the configurations with default values. -type cfg struct { - dialTimeout time.Duration // client dial timeout - - logger *logger.Logger // logging component - - metrics morphmetrics.Register - - waitInterval time.Duration - - signer *transaction.Signer - - endpoints []Endpoint - - inactiveModeCb Callback - - switchInterval time.Duration - - morphCacheMetrics metrics.MorphCacheMetrics - - dialerSource DialerSource -} - -const ( - defaultDialTimeout = 5 * time.Second - defaultWaitInterval = 500 * time.Millisecond -) - -var ErrNoHealthyEndpoint = errors.New("no healthy endpoint") - -func defaultConfig() *cfg { - return &cfg{ - dialTimeout: defaultDialTimeout, - logger: logger.NewLoggerWrapper(zap.L()), - metrics: morphmetrics.NoopRegister{}, - waitInterval: defaultWaitInterval, - signer: &transaction.Signer{ - Scopes: transaction.Global, - }, - morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{}, - dialerSource: &noopDialerSource{}, - } -} - -// New creates, initializes and returns the Client instance. -// Notary support should be enabled with EnableNotarySupport client -// method separately. -// -// If private key is nil, it panics. -// -// Other values are set according to provided options, or by default: -// - client context: Background; -// - dial timeout: 5s; -// - blockchain network type: netmode.PrivNet; -// - signer with the global scope; -// - wait interval: 500ms; -// - logger: &logger.Logger{Logger: zap.L()}. -// - metrics: metrics.NoopRegister -// -// If desired option satisfies the default value, it can be omitted. -// If multiple options of the same config value are supplied, -// the option with the highest index in the arguments will be used. -// If the list of endpoints provided - uses first alive. -// If there are no healthy endpoint - returns ErrNoHealthyEndpoint. -func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, error) { - if key == nil { - panic("empty private key") - } - - acc := wallet.NewAccountFromPrivateKey(key) - accAddr := key.GetScriptHash() - - // build default configuration - cfg := defaultConfig() - - // apply options - for _, opt := range opts { - opt(cfg) - } - - if len(cfg.endpoints) == 0 { - return nil, errors.New("no endpoints were provided") - } - - cli := &Client{ - cache: newClientCache(cfg.morphCacheMetrics), - logger: cfg.logger, - metrics: cfg.metrics, - acc: acc, - accAddr: accAddr, - cfg: *cfg, - closeChan: make(chan struct{}), - } - - cli.endpoints.init(cfg.endpoints) - - var err error - var act *actor.Actor - var endpoint Endpoint - for cli.endpoints.curr, endpoint = range cli.endpoints.list { - cli.client, act, err = cli.newCli(ctx, endpoint) - if err != nil { - cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint, - zap.Error(err), zap.String("endpoint", endpoint.Address)) - } else { - cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint, - zap.String("endpoint", endpoint.Address)) - if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 { - cli.switchIsActive.Store(true) - go cli.switchToMostPrioritized(ctx) - } - break - } - } - if cli.client == nil { - return nil, ErrNoHealthyEndpoint - } - cs, err := cli.client.GetContractStateByID(nnsContractID) - if err != nil { - return nil, fmt.Errorf("resolve nns hash: %w", err) - } - cli.nnsHash = cs.Hash - cli.setActor(act) - - go cli.closeWaiter(ctx) - - return cli, nil -} - -func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSClient, *actor.Actor, error) { - cfg, err := endpoint.MTLSConfig.parse() - if err != nil { - return nil, nil, fmt.Errorf("read mtls certificates: %w", err) - } - cli, err := rpcclient.NewWS(ctx, endpoint.Address, rpcclient.WSOptions{ - Options: rpcclient.Options{ - DialTimeout: c.cfg.dialTimeout, - TLSClientConfig: cfg, - NetDialContext: c.cfg.dialerSource.NetContextDialer(), - }, - }) - if err != nil { - return nil, nil, fmt.Errorf("WS client creation: %w", err) - } - - defer func() { - if err != nil { - cli.Close() - } - }() - - err = cli.Init() - if err != nil { - return nil, nil, fmt.Errorf("WS client initialization: %w", err) - } - - act, err := newActor(cli, c.acc, c.cfg) - if err != nil { - return nil, nil, fmt.Errorf("RPC actor creation: %w", err) - } - - return cli, act, nil -} - -func newActor(ws *rpcclient.WSClient, acc *wallet.Account, cfg cfg) (*actor.Actor, error) { - return actor.New(ws, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: acc.PrivateKey().PublicKey().GetScriptHash(), - Scopes: cfg.signer.Scopes, - AllowedContracts: cfg.signer.AllowedContracts, - AllowedGroups: cfg.signer.AllowedGroups, - }, - Account: acc, - }}) -} - -func newClientCache(morphCacheMetrics metrics.MorphCacheMetrics) cache { - c, _ := lru.New[util.Uint256, uint32](100) // returns error only if size is negative - return cache{ - txHeights: c, - metrics: morphCacheMetrics, - } -} - -// WithDialTimeout returns a client constructor option -// that specifies neo-go client dial timeout duration. -// -// Ignores non-positive value. Has no effect if WithSingleClient -// is provided. -// -// If option not provided, 5s timeout is used. -func WithDialTimeout(dur time.Duration) Option { - return func(c *cfg) { - if dur > 0 { - c.dialTimeout = dur - } - } -} - -// WithLogger returns a client constructor option -// that specifies the component for writing log messages. -// -// Ignores nil value. -// -// If option not provided, &logger.Logger{Logger: zap.L()} is used. -func WithLogger(logger *logger.Logger) Option { - return func(c *cfg) { - if logger != nil { - c.logger = logger - } - } -} - -// WithMetrics returns a client constructor option -// that specifies the component for reporting metrics. -// -// Ignores nil value. -// -// If option not provided, NoopMetrics is used. -func WithMetrics(metrics morphmetrics.Register) Option { - return func(c *cfg) { - if metrics != nil { - c.metrics = metrics - } - } -} - -// WithSigner returns a client constructor option -// that specifies the signer and the scope of the transaction. -// -// Ignores nil value. -// -// If option not provided, signer with global scope is used. -func WithSigner(signer *transaction.Signer) Option { - return func(c *cfg) { - if signer != nil { - c.signer = signer - } - } -} - -// WithEndpoints returns a client constructor option -// that specifies additional Neo rpc endpoints. -func WithEndpoints(endpoints ...Endpoint) Option { - return func(c *cfg) { - c.endpoints = append(c.endpoints, endpoints...) - } -} - -// WithConnLostCallback return a client constructor option -// that specifies a callback that is called when Client -// unsuccessfully tried to connect to all the specified -// endpoints. -func WithConnLostCallback(cb Callback) Option { - return func(c *cfg) { - c.inactiveModeCb = cb - } -} - -// WithSwitchInterval returns a client constructor option -// that specifies a wait interval b/w attempts to reconnect -// to an RPC node with the highest priority. -func WithSwitchInterval(i time.Duration) Option { - return func(c *cfg) { - c.switchInterval = i - } -} - -func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option { - return func(c *cfg) { - c.morphCacheMetrics = morphCacheMetrics - } -} - -type DialerSource interface { - NetContextDialer() func(context.Context, string, string) (net.Conn, error) -} - -type noopDialerSource struct{} - -func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) { - return nil -} - -func WithDialerSource(ds DialerSource) Option { - return func(c *cfg) { - c.dialerSource = ds - } -} diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go deleted file mode 100644 index be684619b..000000000 --- a/pkg/morph/client/container/client.go +++ /dev/null @@ -1,86 +0,0 @@ -package container - -import ( - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS Container contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static Container contract client -} - -const ( - putMethod = "put" - deleteMethod = "delete" - getMethod = "get" - listMethod = "list" - containersOfMethod = "containersOf" - deletionInfoMethod = "deletionInfo" - - // putNamedMethod is method name for container put with an alias. It is exported to provide custom fee. - putNamedMethod = "putNamed" -) - -var errNilArgument = errors.New("empty argument") - -// NewFromMorph returns the wrapper instance from the raw morph client. -// -// Specified fee is used for all operations by default. If WithCustomFeeForNamedPut is provided, -// the customized fee is used for Put operations with named containers. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) { - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - sc, err := client.NewStatic(cli, contract, fee, *o...) - if err != nil { - return nil, fmt.Errorf("create 'container' contract client: %w", err) - } - - return &Client{client: sc}, nil -} - -// Morph returns raw morph client. -func (c Client) Morph() *client.Client { - return c.client.Morph() -} - -// ContractAddress returns the address of the associated contract. -func (c Client) ContractAddress() util.Uint160 { - return c.client.ContractAddress() -} - -// Option allows to set an optional -// parameter of Wrapper. -type Option func(*opts) - -type opts []client.StaticClientOption - -func defaultOpts() *opts { - return &opts{client.TryNotary()} -} - -// AsAlphabet returns option to sign main TX -// of notary requests with client's private -// key. -// -// Considered to be used by IR nodes only. -func AsAlphabet() Option { - return func(o *opts) { - *o = append(*o, client.AsAlphabet()) - } -} diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go deleted file mode 100644 index 60fb8ad7c..000000000 --- a/pkg/morph/client/container/containers_of.go +++ /dev/null @@ -1,67 +0,0 @@ -package container - -import ( - "context" - "errors" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// ContainersOf returns a list of container identifiers belonging -// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers. -// -// If remote RPC does not support neo-go session API, fallback to List() method. -func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) { - var cidList []cid.ID - var err error - - cb := func(id cid.ID) error { - cidList = append(cidList, id) - return nil - } - if err = c.IterateContainersOf(ctx, idUser, cb); err != nil { - return nil, err - } - return cidList, nil -} - -// iterateContainers iterates over a list of container identifiers -// belonging to the specified user of FrostFS system and executes -// `cb` on each element. If idUser is nil, calls it on the list of all containers. -func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error { - var rawID []byte - if idUser != nil { - rawID = idUser.WalletBytes() - } - - itemCb := func(item stackitem.Item) error { - id, err := getCIDfromStackItem(item) - if err != nil { - return err - } - if err = cb(id); err != nil { - return err - } - return nil - } - - // We would like to have batch size as big as possible, - // to reduce the number of round-trips and avoid creating sessions. - // The limit depends on 2 things: - // 1. VM limits: max 2048 items on stack. - // 2. JSON encoded size for the item with type = 128k. - // It turns out, that for container ID the second limit is hit first, - // 512 is big enough value and it is beautiful. - const batchSize = 512 - - cnrHash := c.client.ContractAddress() - err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) - if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { - return c.iterate(ctx, idUser, cb) - } - - return err -} diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go deleted file mode 100644 index 09912efa5..000000000 --- a/pkg/morph/client/container/delete.go +++ /dev/null @@ -1,84 +0,0 @@ -package container - -import ( - "context" - "crypto/sha256" - "fmt" - - core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// Delete marshals container ID, and passes it to Wrapper's Delete method -// along with signature and session token. -// -// Returns error if container ID is nil. -func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error { - binCnr := make([]byte, sha256.Size) - witness.ContainerID.Encode(binCnr) - - var prm DeletePrm - - prm.SetCID(binCnr) - prm.SetSignature(witness.Signature.GetSign()) - prm.SetKey(witness.Signature.GetKey()) - - if tok := witness.SessionToken; tok != nil { - prm.SetToken(tok.Marshal()) - } - - _, err := c.Delete(ctx, prm) - return err -} - -// DeletePrm groups parameters of Delete client operation. -type DeletePrm struct { - cnr []byte - signature []byte - token []byte - key []byte - - client.InvokePrmOptional -} - -// SetCID sets container ID. -func (d *DeletePrm) SetCID(cid []byte) { - d.cnr = cid -} - -// SetSignature sets signature. -func (d *DeletePrm) SetSignature(signature []byte) { - d.signature = signature -} - -// SetToken sets session token. -func (d *DeletePrm) SetToken(token []byte) { - d.token = token -} - -// SetKey sets public key. -func (d *DeletePrm) SetKey(key []byte) { - d.key = key -} - -// Delete removes the container from FrostFS system -// through Container contract call. -// -// Returns valid until block and any error encountered that caused -// the removal to interrupt. -func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { - if len(p.signature) == 0 && !p.IsControl() { - return 0, errNilArgument - } - - prm := client.InvokePrm{} - prm.SetMethod(deleteMethod) - prm.SetArgs(p.cnr, p.signature, p.key, p.token) - prm.InvokePrmOptional = p.InvokePrmOptional - - res, err := c.client.Invoke(ctx, prm) - if err != nil { - return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err) - } - return res.VUB, nil -} diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go deleted file mode 100644 index 90bcdd7d5..000000000 --- a/pkg/morph/client/container/deletion_info.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "context" - "crypto/sha256" - "fmt" - "strings" - - containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/mr-tron/base58" -) - -func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) { - return DeletionInfo(ctx, (*Client)(x), cnr) -} - -type deletionInfo interface { - DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) -} - -func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - return c.DeletionInfo(ctx, binCnr) -} - -func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(deletionInfoMethod) - prm.SetArgs(cid) - - res, err := c.client.TestInvoke(ctx, prm) - if err != nil { - if strings.Contains(err.Error(), containerContract.NotFoundError) { - return nil, new(apistatus.ContainerNotFound) - } - return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err) - } else if ln := len(res); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln) - } - - arr, err := client.ArrayFromStackItem(res[0]) - if err != nil { - return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err) - } - - if len(arr) != 2 { - return nil, fmt.Errorf("unexpected container stack item count (%s): %d", deletionInfoMethod, len(arr)) - } - - rawOwner, err := client.BytesFromStackItem(arr[0]) - if err != nil { - return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err) - } - - var owner user.ID - if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil { - return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err) - } - - epoch, err := client.BigIntFromStackItem(arr[1]) - if err != nil { - return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err) - } - - return &containercore.DelInfo{ - Owner: owner, - Epoch: epoch.Uint64(), - }, nil -} diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go deleted file mode 100644 index 8622d2cdd..000000000 --- a/pkg/morph/client/container/get.go +++ /dev/null @@ -1,115 +0,0 @@ -package container - -import ( - "context" - "crypto/sha256" - "fmt" - "strings" - - containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -type containerSource Client - -func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) { - return Get(ctx, (*Client)(x), cnr) -} - -// AsContainerSource provides container Source interface -// from Wrapper instance. -func AsContainerSource(w *Client) containercore.Source { - return (*containerSource)(w) -} - -type getContainer interface { - Get(ctx context.Context, cid []byte) (*containercore.Container, error) -} - -// Get marshals container ID, and passes it to Wrapper's Get method. -func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) { - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - return c.Get(ctx, binCnr) -} - -// Get reads the container from FrostFS system by binary identifier -// through Container contract call. -// -// If an empty slice is returned for the requested identifier, -// storage.ErrNotFound error is returned. -func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(getMethod) - prm.SetArgs(cid) - - res, err := c.client.TestInvoke(ctx, prm) - if err != nil { - if strings.Contains(err.Error(), containerContract.NotFoundError) { - return nil, new(apistatus.ContainerNotFound) - } - return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err) - } else if ln := len(res); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln) - } - - arr, err := client.ArrayFromStackItem(res[0]) - if err != nil { - return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err) - } - - if len(arr) != 4 { - return nil, fmt.Errorf("unexpected container stack item count (%s): %d", getMethod, len(arr)) - } - - cnrBytes, err := client.BytesFromStackItem(arr[0]) - if err != nil { - return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err) - } - - sigBytes, err := client.BytesFromStackItem(arr[1]) - if err != nil { - return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err) - } - - pub, err := client.BytesFromStackItem(arr[2]) - if err != nil { - return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err) - } - - tokBytes, err := client.BytesFromStackItem(arr[3]) - if err != nil { - return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err) - } - - var cnr containercore.Container - - if err := cnr.Value.Unmarshal(cnrBytes); err != nil { - // use other major version if there any - return nil, fmt.Errorf("unmarshal container: %w", err) - } - - if len(tokBytes) > 0 { - cnr.Session = new(session.Container) - - err = cnr.Session.Unmarshal(tokBytes) - if err != nil { - return nil, fmt.Errorf("unmarshal session token: %w", err) - } - } - - // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion - var sigV2 refs.Signature - sigV2.SetKey(pub) - sigV2.SetSign(sigBytes) - sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256) - - err = cnr.Signature.ReadFromV2(sigV2) - return &cnr, err -} diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go deleted file mode 100644 index fc63d1beb..000000000 --- a/pkg/morph/client/container/list.go +++ /dev/null @@ -1,69 +0,0 @@ -package container - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// iterate iterates through a list of container identifiers belonging -// to the specified user of FrostFS system. The list is composed -// through Container contract call. -// -// Iterates through the identifiers of all FrostFS containers if pointer -// to user identifier is nil. -func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error { - var rawID []byte - - if idUser != nil { - rawID = idUser.WalletBytes() - } - - prm := client.TestInvokePrm{} - prm.SetMethod(listMethod) - prm.SetArgs(rawID) - - res, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return fmt.Errorf("test invoke (%s): %w", listMethod, err) - } else if ln := len(res); ln != 1 { - return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) - } - - res, err = client.ArrayFromStackItem(res[0]) - if err != nil { - return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err) - } - - for i := range res { - id, err := getCIDfromStackItem(res[i]) - if err != nil { - return err - } - - if err = cb(id); err != nil { - return err - } - } - - return nil -} - -func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) { - rawID, err := client.BytesFromStackItem(item) - if err != nil { - return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err) - } - - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return cid.ID{}, fmt.Errorf("decode container ID: %w", err) - } - return id, nil -} diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go deleted file mode 100644 index 3bb84eb87..000000000 --- a/pkg/morph/client/container/put.go +++ /dev/null @@ -1,123 +0,0 @@ -package container - -import ( - "context" - "fmt" - - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// Put marshals container, and passes it to Wrapper's Put method -// along with sig.Key() and sig.Sign(). -// -// Returns error if container is nil. -func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) { - data := cnr.Value.Marshal() - - d := container.ReadDomain(cnr.Value) - - var prm PutPrm - prm.SetContainer(data) - prm.SetName(d.Name()) - prm.SetZone(d.Zone()) - - if cnr.Session != nil { - prm.SetToken(cnr.Session.Marshal()) - } - - // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion - var sigV2 refs.Signature - cnr.Signature.WriteToV2(&sigV2) - - prm.SetKey(sigV2.GetKey()) - prm.SetSignature(sigV2.GetSign()) - - err := c.Put(ctx, prm) - if err != nil { - return nil, err - } - - var id cid.ID - container.CalculateIDFromBinary(&id, data) - - return &id, nil -} - -// PutPrm groups parameters of Put operation. -type PutPrm struct { - cnr []byte - key []byte - sig []byte - token []byte - name string - zone string - - client.InvokePrmOptional -} - -// SetContainer sets container data. -func (p *PutPrm) SetContainer(cnr []byte) { - p.cnr = cnr -} - -// SetKey sets public key. -func (p *PutPrm) SetKey(key []byte) { - p.key = key -} - -// SetSignature sets signature. -func (p *PutPrm) SetSignature(sig []byte) { - p.sig = sig -} - -// SetToken sets session token. -func (p *PutPrm) SetToken(token []byte) { - p.token = token -} - -// SetName sets native name. -func (p *PutPrm) SetName(name string) { - p.name = name -} - -// SetZone sets zone. -func (p *PutPrm) SetZone(zone string) { - p.zone = zone -} - -// Put saves binary container with its session token, key and signature -// in FrostFS system through Container contract call. -// -// Returns calculated container identifier and any error -// encountered that caused the saving to interrupt. -func (c *Client) Put(ctx context.Context, p PutPrm) error { - if len(p.sig) == 0 || len(p.key) == 0 { - return errNilArgument - } - - var ( - method string - prm client.InvokePrm - ) - - if p.name != "" { - method = putNamedMethod - prm.SetArgs(p.cnr, p.sig, p.key, p.token, p.name, p.zone) - } else { - method = putMethod - prm.SetArgs(p.cnr, p.sig, p.key, p.token) - } - - prm.SetMethod(method) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - if err != nil { - return fmt.Errorf("invoke method (%s): %w", method, err) - } - return nil -} diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go deleted file mode 100644 index d3eba7639..000000000 --- a/pkg/morph/client/frostfs/cheque.go +++ /dev/null @@ -1,79 +0,0 @@ -package frostfscontract - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// ChequePrm groups parameters of Cheque operation. -type ChequePrm struct { - id []byte - user util.Uint160 - amount int64 - lock util.Uint160 - - client.InvokePrmOptional -} - -// SetID sets ID of the cheque. -func (c *ChequePrm) SetID(id []byte) { - c.id = id -} - -// SetUser sets user. -func (c *ChequePrm) SetUser(user util.Uint160) { - c.user = user -} - -// SetAmount sets amount. -func (c *ChequePrm) SetAmount(amount int64) { - c.amount = amount -} - -// SetLock sets lock. -func (c *ChequePrm) SetLock(lock util.Uint160) { - c.lock = lock -} - -// Cheque invokes `cheque` method of FrostFS contract. -func (x *Client) Cheque(ctx context.Context, p ChequePrm) error { - prm := client.InvokePrm{} - prm.SetMethod(chequeMethod) - prm.SetArgs(p.id, p.user, p.amount, p.lock) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := x.client.Invoke(ctx, prm) - return err -} - -// AlphabetUpdatePrm groups parameters of AlphabetUpdate operation. -type AlphabetUpdatePrm struct { - id []byte - pubs keys.PublicKeys - - client.InvokePrmOptional -} - -// SetID sets update ID. -func (a *AlphabetUpdatePrm) SetID(id []byte) { - a.id = id -} - -// SetPubs sets new alphabet public keys. -func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) { - a.pubs = pubs -} - -// AlphabetUpdate update list of alphabet nodes. -func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error { - prm := client.InvokePrm{} - prm.SetMethod(alphabetUpdateMethod) - prm.SetArgs(p.id, p.pubs) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := x.client.Invoke(ctx, prm) - return err -} diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go deleted file mode 100644 index cd6a9849e..000000000 --- a/pkg/morph/client/frostfs/client.go +++ /dev/null @@ -1,76 +0,0 @@ -package frostfscontract - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static FrostFS contract client -} - -const ( - alphabetUpdateMethod = "alphabetUpdate" - chequeMethod = "cheque" -) - -// NewFromMorph wraps client to work with FrostFS contract. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) { - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) - if err != nil { - return nil, fmt.Errorf("create 'frostfs' contract client: %w", err) - } - - return &Client{client: sc}, nil -} - -// ContractAddress returns the address of the associated contract. -func (x *Client) ContractAddress() util.Uint160 { - return x.client.ContractAddress() -} - -// Option allows to set an optional -// parameter of ClientWrapper. -type Option func(*opts) - -type opts []client.StaticClientOption - -func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } -} - -// AsAlphabet returns option to sign main TX -// of notary requests with client's private -// key. -// -// Considered to be used by IR nodes only. -func AsAlphabet() Option { - return func(o *opts) { - *o = append(*o, client.AsAlphabet()) - } -} diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go deleted file mode 100644 index 61eb03f09..000000000 --- a/pkg/morph/client/frostfsid/client.go +++ /dev/null @@ -1,34 +0,0 @@ -package frostfsid - -import ( - "fmt" - - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS ID contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static FrostFS ID contract client -} - -var _ frostfsidcore.SubjectProvider = (*Client)(nil) - -// NewFromMorph wraps client to work with FrostFS ID contract. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) { - sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet()) - if err != nil { - return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err) - } - - return &Client{client: sc}, nil -} diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go deleted file mode 100644 index 3a789672a..000000000 --- a/pkg/morph/client/frostfsid/subject.go +++ /dev/null @@ -1,74 +0,0 @@ -package frostfsid - -import ( - "context" - "fmt" - - frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -const ( - methodGetSubject = "getSubject" - methodGetSubjectExtended = "getSubjectExtended" -) - -func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(methodGetSubject) - prm.SetArgs(addr) - - res, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err) - } - - structArr, err := checkStackItem(res) - if err != nil { - return nil, fmt.Errorf("invalid test invocation result (%s): %w", methodGetSubjectExtended, err) - } - - subj, err := frostfsidclient.ParseSubject(structArr) - if err != nil { - return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) - } - - return subj, nil -} - -func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(methodGetSubjectExtended) - prm.SetArgs(addr) - - res, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err) - } - - structArr, err := checkStackItem(res) - if err != nil { - return nil, fmt.Errorf("invalid test invocation result (%s): %w", methodGetSubjectExtended, err) - } - - subj, err := frostfsidclient.ParseSubjectExtended(structArr) - if err != nil { - return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) - } - - return subj, nil -} - -func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error) { - if ln := len(res); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", methodGetSubject, ln) - } - - structArr, err = client.ArrayFromStackItem(res[0]) - if err != nil { - return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err) - } - return -} diff --git a/pkg/morph/client/mtls.go b/pkg/morph/client/mtls.go deleted file mode 100644 index 3de51afe7..000000000 --- a/pkg/morph/client/mtls.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "crypto/tls" - - "github.com/nspcc-dev/neo-go/pkg/rpcclient" -) - -// MTLSConfig represents endpoint mTLS configuration. -type MTLSConfig struct { - TrustedCAList []string - KeyFile string - CertFile string -} - -func (m *MTLSConfig) parse() (*tls.Config, error) { - if m == nil { - return nil, nil - } - - return rpcclient.TLSClientConfig(m.TrustedCAList, m.CertFile, m.KeyFile) -} diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go deleted file mode 100644 index b9e39c25e..000000000 --- a/pkg/morph/client/multi.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "context" - "slices" - "sort" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "go.uber.org/zap" -) - -// Endpoint represents morph endpoint together with its priority. -type Endpoint struct { - Address string - Priority int - MTLSConfig *MTLSConfig -} - -type endpoints struct { - curr int - list []Endpoint -} - -func (e *endpoints) init(ee []Endpoint) { - sort.SliceStable(ee, func(i, j int) bool { - return ee[i].Priority < ee[j].Priority - }) - - e.curr = 0 - e.list = ee -} - -// SwitchRPC performs reconnection and returns true if it was successful. -func (c *Client) SwitchRPC(ctx context.Context) bool { - c.switchLock.Lock() - defer c.switchLock.Unlock() - - c.client.Close() - - // Iterate endpoints in the order of decreasing priority. - for c.endpoints.curr = range c.endpoints.list { - newEndpoint := c.endpoints.list[c.endpoints.curr] - cli, act, err := c.newCli(ctx, newEndpoint) - if err != nil { - c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, - zap.String("endpoint", newEndpoint.Address), - zap.Error(err), - ) - - continue - } - - c.cache.invalidate() - - c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, - zap.String("endpoint", newEndpoint.Address)) - - c.client = cli - c.setActor(act) - - if c.cfg.switchInterval != 0 && !c.switchIsActive.Load() && - c.endpoints.list[c.endpoints.curr].Priority != c.endpoints.list[0].Priority { - c.switchIsActive.Store(true) - go c.switchToMostPrioritized(ctx) - } - - return true - } - - c.inactive = true - - if c.cfg.inactiveModeCb != nil { - c.cfg.inactiveModeCb() - } - return false -} - -func (c *Client) closeWaiter(ctx context.Context) { - c.wg.Add(1) - defer c.wg.Done() - select { - case <-ctx.Done(): - case <-c.closeChan: - } - _ = c.UnsubscribeAll() - c.close() -} - -func (c *Client) switchToMostPrioritized(ctx context.Context) { - t := time.NewTicker(c.cfg.switchInterval) - defer t.Stop() - defer c.switchIsActive.Store(false) - -mainLoop: - for { - select { - case <-ctx.Done(): - return - case <-t.C: - c.switchLock.RLock() - - endpointsCopy := slices.Clone(c.endpoints.list) - currPriority := c.endpoints.list[c.endpoints.curr].Priority - highestPriority := c.endpoints.list[0].Priority - - c.switchLock.RUnlock() - - if currPriority == highestPriority { - // already connected to - // the most prioritized - return - } - - for i, e := range endpointsCopy { - if currPriority == e.Priority { - // a switch will not increase the priority - continue mainLoop - } - - tryE := e.Address - - cli, act, err := c.newCli(ctx, e) - if err != nil { - c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode, - zap.String("endpoint", tryE), - zap.Error(err), - ) - continue - } - - c.switchLock.Lock() - - // higher priority node could have been - // connected in the other goroutine - if e.Priority >= c.endpoints.list[c.endpoints.curr].Priority { - cli.Close() - c.switchLock.Unlock() - return - } - - c.client.Close() - c.cache.invalidate() - c.client = cli - c.setActor(act) - c.endpoints.curr = i - - c.switchLock.Unlock() - - c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC, - zap.String("endpoint", tryE)) - - return - } - } - } -} - -// close closes notification channel and wrapped WS client. -func (c *Client) close() { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - c.client.Close() -} diff --git a/pkg/morph/client/multy_test.go b/pkg/morph/client/multy_test.go deleted file mode 100644 index 84a07b0a4..000000000 --- a/pkg/morph/client/multy_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package client - -import ( - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestInitEndpoints(t *testing.T) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - ee := make([]Endpoint, 100) - for i := range ee { - ee[i].Priority = r.Int() - } - - var eeInternal endpoints - eeInternal.init(ee) - - prevValue := eeInternal.list[0].Priority - - for _, e := range eeInternal.list { - require.True(t, prevValue <= e.Priority) - - prevValue = e.Priority - } -} diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go deleted file mode 100644 index de8afbfb5..000000000 --- a/pkg/morph/client/netmap/client.go +++ /dev/null @@ -1,90 +0,0 @@ -package netmap - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type NodeInfo = netmap.NodeInfo - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS Netmap contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static Netmap contract client -} - -const ( - addPeerMethod = "addPeer" - configMethod = "config" - epochMethod = "epoch" - lastEpochBlockMethod = "lastEpochBlock" - innerRingListMethod = "innerRingList" - netMapCandidatesMethod = "netmapCandidates" - netMapMethod = "netmap" - newEpochMethod = "newEpoch" - setConfigMethod = "setConfig" - updateInnerRingMethod = "updateInnerRing" - snapshotMethod = "snapshot" - updateStateMethod = "updateState" - - epochSnapshotMethod = "snapshotByEpoch" - - configListMethod = "listConfig" -) - -// NewFromMorph returns the wrapper instance from the raw morph client. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) { - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) - if err != nil { - return nil, fmt.Errorf("create 'netmap' contract client: %w", err) - } - - return &Client{client: sc}, nil -} - -// Option allows to set an optional -// parameter of Wrapper. -type Option func(*opts) - -type opts []client.StaticClientOption - -func defaultOpts() *opts { - return &opts{client.TryNotary()} -} - -// AsAlphabet returns option to sign main TX -// of notary requests with client's private -// key. -// -// Considered to be used by IR nodes only. -func AsAlphabet() Option { - return func(o *opts) { - *o = append(*o, client.AsAlphabet()) - } -} - -// ContractAddress returns the address of the associated contract. -func (c Client) ContractAddress() util.Uint160 { - return c.client.ContractAddress() -} - -// Morph returns raw morph client. -func (c Client) Morph() *client.Client { - return c.client.Morph() -} diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go deleted file mode 100644 index 3f6aed506..000000000 --- a/pkg/morph/client/netmap/config.go +++ /dev/null @@ -1,300 +0,0 @@ -package netmap - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/bigint" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -const ( - MaxObjectSizeConfig = "MaxObjectSize" - MaxECParityCountConfig = "MaxECParityCount" - MaxECDataCountConfig = "MaxECDataCount" - EpochDurationConfig = "EpochDuration" - ContainerFeeConfig = "ContainerFee" - ContainerAliasFeeConfig = "ContainerAliasFee" - IrCandidateFeeConfig = "InnerRingCandidateFee" - WithdrawFeeConfig = "WithdrawFee" - HomomorphicHashingDisabledKey = "HomomorphicHashingDisabled" - MaintenanceModeAllowedConfig = "MaintenanceModeAllowed" -) - -// MaxObjectSize receives max object size configuration -// value through the Netmap contract call. -func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, MaxObjectSizeConfig) -} - -// EpochDuration returns number of sidechain blocks per one FrostFS epoch. -func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, EpochDurationConfig) -} - -// ContainerFee returns fee paid by container owner to each alphabet node -// for container registration. -func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, ContainerFeeConfig) -} - -// ContainerAliasFee returns additional fee paid by container owner to each -// alphabet node for container nice name registration. -func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, ContainerAliasFeeConfig) -} - -// HomomorphicHashDisabled returns global configuration value of homomorphic hashing -// settings. -// -// Returns (false, nil) if config key is not found in the contract. -func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { - return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey) -} - -// InnerRingCandidateFee returns global configuration value of fee paid by -// node to be in inner ring candidates list. -func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, IrCandidateFeeConfig) -} - -// WithdrawFee returns global configuration value of fee paid by user to -// withdraw assets from FrostFS contract. -func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, WithdrawFeeConfig) -} - -// MaintenanceModeAllowed reads admission of "maintenance" state from the -// FrostFS network configuration stored in the Sidechain. The admission means -// that storage nodes are allowed to switch their state to "maintenance". -// -// By default, maintenance state is disallowed. -func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { - return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig) -} - -func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { - v, err := c.config(ctx, []byte(key)) - if err != nil { - return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) - } - - bi, err := v.TryInteger() - if err != nil { - return 0, err - } - return bi.Uint64(), nil -} - -// reads boolean value by the given key from the FrostFS network configuration -// stored in the Sidechain. Returns false if key is not presented. -func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { - v, err := c.config(ctx, []byte(key)) - if err != nil { - return false, fmt.Errorf("read netconfig value '%s': %w", key, err) - } - - return v.TryBool() -} - -// SetConfigPrm groups parameters of SetConfig operation. -type SetConfigPrm struct { - id []byte - key []byte - value any - - client.InvokePrmOptional -} - -// SetID sets ID of the config value. -func (s *SetConfigPrm) SetID(id []byte) { - s.id = id -} - -// SetKey sets key of the config value. -func (s *SetConfigPrm) SetKey(key []byte) { - s.key = key -} - -// SetValue sets value of the config value. -func (s *SetConfigPrm) SetValue(value any) { - s.value = value -} - -// SetConfig sets config field. -func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(setConfigMethod) - prm.SetArgs(p.id, p.key, p.value) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - return err -} - -// RawNetworkParameter is a FrostFS network parameter which is transmitted but -// not interpreted by the FrostFS API protocol. -type RawNetworkParameter struct { - // Name of the parameter. - Name string - - // Raw parameter value. - Value []byte -} - -// NetworkConfiguration represents FrostFS network configuration stored -// in the FrostFS Sidechain. -type NetworkConfiguration struct { - MaxObjectSize uint64 - - EpochDuration uint64 - - ContainerFee uint64 - - ContainerAliasFee uint64 - - IRCandidateFee uint64 - - WithdrawalFee uint64 - - HomomorphicHashingDisabled bool - - MaintenanceModeAllowed bool - - Raw []RawNetworkParameter -} - -// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain. -func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) { - var res NetworkConfiguration - prm := client.TestInvokePrm{} - prm.SetMethod(configListMethod) - - items, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return res, fmt.Errorf("test invoke (%s): %w", - configListMethod, err) - } - - if ln := len(items); ln != 1 { - return res, fmt.Errorf("unexpected stack item count (%s): %d", configListMethod, ln) - } - - arr, err := client.ArrayFromStackItem(items[0]) - if err != nil { - return res, fmt.Errorf("record list (%s): %w", configListMethod, err) - } - - m := make(map[string]struct{}, len(arr)) - res.Raw = make([]RawNetworkParameter, 0, len(arr)) - - err = iterateRecords(arr, func(name string, value []byte) error { - _, ok := m[name] - if ok { - return fmt.Errorf("duplicated config name %s", name) - } - - m[name] = struct{}{} - - switch name { - default: - res.Raw = append(res.Raw, RawNetworkParameter{ - Name: name, - Value: value, - }) - case MaxObjectSizeConfig: - res.MaxObjectSize = bytesToUint64(value) - case EpochDurationConfig: - res.EpochDuration = bytesToUint64(value) - case ContainerFeeConfig: - res.ContainerFee = bytesToUint64(value) - case ContainerAliasFeeConfig: - res.ContainerAliasFee = bytesToUint64(value) - case IrCandidateFeeConfig: - res.IRCandidateFee = bytesToUint64(value) - case WithdrawFeeConfig: - res.WithdrawalFee = bytesToUint64(value) - case HomomorphicHashingDisabledKey: - res.HomomorphicHashingDisabled = bytesToBool(value) - case MaintenanceModeAllowedConfig: - res.MaintenanceModeAllowed = bytesToBool(value) - } - - return nil - }) - - return res, err -} - -func bytesToUint64(val []byte) uint64 { - if len(val) == 0 { - return 0 - } - return bigint.FromBytes(val).Uint64() -} - -func bytesToBool(val []byte) bool { - for i := range val { - if val[i] != 0 { - return true - } - } - - return false -} - -// config performs the test invoke of get config value -// method of FrostFS Netmap contract. -// -// Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(configMethod) - prm.SetArgs(key) - - items, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", - configMethod, err) - } - - if ln := len(items); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", - configMethod, ln) - } - - return items[0], nil -} - -// iterateRecords iterates over all config records and passes them to f. -// -// Returns f's errors directly. -func iterateRecords(arr []stackitem.Item, f func(key string, value []byte) error) error { - for i := range arr { - fields, err := client.ArrayFromStackItem(arr[i]) - if err != nil { - return fmt.Errorf("record fields: %w", err) - } - - if ln := len(fields); ln != 2 { - return fmt.Errorf("unexpected record fields number: %d", ln) - } - - k, err := client.BytesFromStackItem(fields[0]) - if err != nil { - return fmt.Errorf("record key: %w", err) - } - - v, err := client.BytesFromStackItem(fields[1]) - if err != nil { - return fmt.Errorf("record value: %w", err) - } - - if err := f(string(k), v); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go deleted file mode 100644 index 8561329ec..000000000 --- a/pkg/morph/client/netmap/epoch.go +++ /dev/null @@ -1,57 +0,0 @@ -package netmap - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// Epoch receives number of current FrostFS epoch -// through the Netmap contract call. -func (c *Client) Epoch(ctx context.Context) (uint64, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(epochMethod) - - items, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return 0, fmt.Errorf("test invoke (%s): %w", - epochMethod, err) - } - - if ln := len(items); ln != 1 { - return 0, fmt.Errorf("unexpected stack item count (%s): %d", - epochMethod, ln) - } - - num, err := client.IntFromStackItem(items[0]) - if err != nil { - return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err) - } - return uint64(num), nil -} - -// LastEpochBlock receives block number of current FrostFS epoch -// through the Netmap contract call. -func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(lastEpochBlockMethod) - - items, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return 0, fmt.Errorf("test invoke (%s): %w", - lastEpochBlockMethod, err) - } - - if ln := len(items); ln != 1 { - return 0, fmt.Errorf("unexpected stack item count (%s): %d", - lastEpochBlockMethod, ln) - } - - block, err := client.IntFromStackItem(items[0]) - if err != nil { - return 0, fmt.Errorf("get number from stack item (%s): %w", - lastEpochBlockMethod, err) - } - return uint32(block), nil -} diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go deleted file mode 100644 index 0e1f9186b..000000000 --- a/pkg/morph/client/netmap/innerring.go +++ /dev/null @@ -1,97 +0,0 @@ -package netmap - -import ( - "context" - "crypto/elliptic" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// UpdateIRPrm groups parameters of UpdateInnerRing -// invocation. -type UpdateIRPrm struct { - keys keys.PublicKeys - - client.InvokePrmOptional -} - -// SetKeys sets new inner ring keys. -func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) { - u.keys = keys -} - -// UpdateInnerRing updates inner ring keys. -func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { - args := make([][]byte, len(p.keys)) - for i := range args { - args[i] = p.keys[i].Bytes() - } - - prm := client.InvokePrm{} - prm.SetMethod(updateInnerRingMethod) - prm.SetArgs(args) - prm.InvokePrmOptional = p.InvokePrmOptional - - _, err := c.client.Invoke(ctx, prm) - return err -} - -// GetInnerRingList return current IR list. -func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(innerRingListMethod) - - prms, err := c.client.TestInvoke(ctx, invokePrm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err) - } - - return irKeysFromStackItem(prms, innerRingListMethod) -} - -func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys, error) { - if ln := len(stack); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", method, ln) - } - - irs, err := client.ArrayFromStackItem(stack[0]) - if err != nil { - return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err) - } - - irKeys := make(keys.PublicKeys, len(irs)) - - for i := range irs { - irKeys[i], err = irKeyFromStackItem(irs[i]) - if err != nil { - return nil, err - } - } - - return irKeys, nil -} - -const irNodeFixedPrmNumber = 1 - -func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { - prms, err := client.ArrayFromStackItem(prm) - if err != nil { - return nil, fmt.Errorf("get stack item array (IRNode): %w", err) - } else if ln := len(prms); ln != irNodeFixedPrmNumber { - return nil, fmt.Errorf( - "unexpected stack item count (IRNode): expected %d, has %d", - irNodeFixedPrmNumber, - ln, - ) - } - - byteKey, err := client.BytesFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err) - } - - return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256()) -} diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go deleted file mode 100644 index 97782fc25..000000000 --- a/pkg/morph/client/netmap/netmap.go +++ /dev/null @@ -1,148 +0,0 @@ -package netmap - -import ( - "context" - "fmt" - - netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and -// decodes netmap.NetMap from the response. -func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(epochSnapshotMethod) - invokePrm.SetArgs(epoch) - - res, err := c.client.TestInvoke(ctx, invokePrm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", - epochSnapshotMethod, err) - } - - nm, err := DecodeNetMap(res) - if err != nil { - return nil, err - } - - nm.SetEpoch(epoch) - - return nm, err -} - -// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo -// from the response. -func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(netMapCandidatesMethod) - - res, err := c.client.TestInvoke(ctx, invokePrm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err) - } - - if len(res) > 0 { - return decodeNodeList(res[0]) - } - - return nil, nil -} - -// NetMap calls "netmap" method and decode netmap.NetMap from the response. -func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(netMapMethod) - - res, err := c.client.TestInvoke(ctx, invokePrm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", - netMapMethod, err) - } - - return DecodeNetMap(res) -} - -func DecodeNetMap(resStack []stackitem.Item) (*netmap.NetMap, error) { - var nm netmap.NetMap - - if len(resStack) > 0 { - nodes, err := decodeNodeList(resStack[0]) - if err != nil { - return nil, err - } - - nm.SetNodes(nodes) - } - - return &nm, nil -} - -func decodeNodeList(itemNodes stackitem.Item) ([]netmap.NodeInfo, error) { - itemArrNodes, err := client.ArrayFromStackItem(itemNodes) - if err != nil { - return nil, fmt.Errorf("decode item array of nodes from the response item: %w", err) - } - - var nodes []netmap.NodeInfo - - if len(itemArrNodes) > 0 { - nodes = make([]netmap.NodeInfo, len(itemArrNodes)) - - for i := range itemArrNodes { - err = decodeNodeInfo(&nodes[i], itemArrNodes[i]) - if err != nil { - return nil, fmt.Errorf("decode node #%d: %w", i+1, err) - } - } - } - - return nodes, nil -} - -func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error { - nodeFields, err := client.ArrayFromStackItem(itemNode) - if err != nil { - return fmt.Errorf("decode item array of node fields: %w", err) - } - - var node netmapcontract.Node - - if len(nodeFields) > 0 { - node.BLOB, err = client.BytesFromStackItem(nodeFields[0]) - if err != nil { - return fmt.Errorf("decode node info BLOB: %w", err) - } - } - - node.State = netmapcontract.NodeStateOnline - - if len(nodeFields) > 1 { - state, err := client.IntFromStackItem(nodeFields[1]) - if err != nil { - return fmt.Errorf("decode integer from 2nd item: %w", err) - } - - node.State = netmapcontract.NodeState(state) - } - - err = dst.Unmarshal(node.BLOB) - if err != nil { - return fmt.Errorf("decode node info: %w", err) - } - - switch node.State { - default: - return fmt.Errorf("unsupported state %v", node.State) - case netmapcontract.NodeStateOnline: - dst.SetStatus(netmap.Online) - case netmapcontract.NodeStateOffline: - dst.SetStatus(netmap.Offline) - case netmapcontract.NodeStateMaintenance: - dst.SetStatus(netmap.Maintenance) - } - - return nil -} diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go deleted file mode 100644 index e686e271e..000000000 --- a/pkg/morph/client/netmap/netmap_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package netmap - -import ( - "crypto/rand" - "math/big" - "strconv" - "testing" - - netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func Test_stackItemsToNodeInfos(t *testing.T) { - expected := make([]netmap.NodeInfo, 4) - for i := range expected { - pub := make([]byte, 33) - rand.Read(pub) - - switch i % 3 { - default: - expected[i].SetStatus(netmap.Offline) - case int(netmapcontract.NodeStateOnline): - expected[i].SetStatus(netmap.Online) - case int(netmapcontract.NodeStateMaintenance): - expected[i].SetStatus(netmap.Maintenance) - } - - expected[i].SetPublicKey(pub) - - expected[i].SetAttribute("key", strconv.Itoa(i)) - } - - items := make([]stackitem.Item, 4) - for i := range items { - data := expected[i].Marshal() - - var state int64 - - switch expected[i].Status() { - case netmap.Online: - state = int64(netmapcontract.NodeStateOnline) - case netmap.Offline: - state = int64(netmapcontract.NodeStateOffline) - case netmap.Maintenance: - state = int64(netmapcontract.NodeStateMaintenance) - } - - items[i] = stackitem.NewStruct([]stackitem.Item{ - stackitem.NewByteArray(data), - stackitem.NewBigInteger(big.NewInt(state)), - }) - } - - actual, err := decodeNodeList(stackitem.NewArray(items)) - require.NoError(t, err) - require.Equal(t, expected, actual) -} diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go deleted file mode 100644 index 341b20935..000000000 --- a/pkg/morph/client/netmap/new_epoch.go +++ /dev/null @@ -1,40 +0,0 @@ -package netmap - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// NewEpoch updates FrostFS epoch number through -// Netmap contract call. -func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error { - prm := client.InvokePrm{} - prm.SetMethod(newEpochMethod) - prm.SetArgs(epoch) - - _, err := c.client.Invoke(ctx, prm) - if err != nil { - return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) - } - return nil -} - -// NewEpochControl updates FrostFS epoch number through -// control notary transaction internally to ensure all -// nodes produce the same transaction with high probability. -// If vub > 0, vub will be used as valid until block value. -func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) { - prm := client.InvokePrm{} - prm.SetMethod(newEpochMethod) - prm.SetArgs(epoch) - prm.SetControlTX(true) - prm.SetVUB(vub) - - res, err := c.client.Invoke(ctx, prm) - if err != nil { - return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) - } - return res.VUB, nil -} diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go deleted file mode 100644 index e83acde39..000000000 --- a/pkg/morph/client/netmap/peer.go +++ /dev/null @@ -1,66 +0,0 @@ -package netmap - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -var errFailedToRemovePeerWithoutNotary = errors.New("peer can be forcefully removed only in notary environment") - -// AddPeerPrm groups parameters of AddPeer operation. -type AddPeerPrm struct { - nodeInfo netmap.NodeInfo - - client.InvokePrmOptional -} - -// SetNodeInfo sets new peer NodeInfo. -func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) { - a.nodeInfo = nodeInfo -} - -// AddPeer registers peer in FrostFS network through -// Netmap contract call. -func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error { - method := addPeerMethod - - if c.client.WithNotary() && c.client.IsAlpha() { - // In notary environments Alphabet must calls AddPeerIR method instead of AddPeer. - // It differs from AddPeer only by name, so we can do this in the same form. - // See https://github.com/nspcc-dev/frostfs-contract/issues/154. - method += "IR" - } - - prm := client.InvokePrm{} - prm.SetMethod(method) - prm.SetArgs(p.nodeInfo.Marshal()) - prm.InvokePrmOptional = p.InvokePrmOptional - - if _, err := c.client.Invoke(ctx, prm); err != nil { - return fmt.Errorf("invoke method (%s): %w", method, err) - } - return nil -} - -// ForceRemovePeer marks the given peer as offline via a notary control transaction. -// If vub > 0, vub will be used as valid until block value. -func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { - if !c.client.WithNotary() { - return 0, errFailedToRemovePeerWithoutNotary - } - - prm := UpdatePeerPrm{} - prm.SetKey(nodeInfo.PublicKey()) - prm.SetControlTX(true) - prm.SetVUB(vub) - - res, err := c.UpdatePeerState(ctx, prm) - if err != nil { - return 0, fmt.Errorf("updating peer state: %v", err) - } - return res.VUB, nil -} diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go deleted file mode 100644 index 9dbec1a90..000000000 --- a/pkg/morph/client/netmap/snapshot.go +++ /dev/null @@ -1,23 +0,0 @@ -package netmap - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response. -func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(snapshotMethod) - prm.SetArgs(diff) - - res, err := c.client.TestInvoke(ctx, prm) - if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err) - } - - return DecodeNetMap(res) -} diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go deleted file mode 100644 index f9f639c19..000000000 --- a/pkg/morph/client/netmap/update_state.go +++ /dev/null @@ -1,59 +0,0 @@ -package netmap - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// UpdatePeerPrm groups parameters of UpdatePeerState operation. -type UpdatePeerPrm struct { - key []byte - - state netmap.NodeState - - client.InvokePrmOptional -} - -// SetKey sets public key. -func (u *UpdatePeerPrm) SetKey(key []byte) { - u.key = key -} - -// SetOnline marks node to be switched into "online" state. -// -// Zero UpdatePeerPrm marks node as "offline". -func (u *UpdatePeerPrm) SetOnline() { - u.state = netmap.NodeStateOnline -} - -// SetMaintenance marks node to be switched into "maintenance" state. -// -// Zero UpdatePeerPrm marks node as "offline". -func (u *UpdatePeerPrm) SetMaintenance() { - u.state = netmap.NodeStateMaintenance -} - -// UpdatePeerState changes peer status through Netmap contract call. -func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) { - method := updateStateMethod - - if c.client.WithNotary() && c.client.IsAlpha() { - // In notary environments Alphabet must calls UpdateStateIR method instead of UpdateState. - // It differs from UpdateState only by name, so we can do this in the same form. - // See https://github.com/nspcc-dev/frostfs-contract/issues/225. - method += "IR" - } - - if p.state == 0 { - p.state = netmap.NodeStateOffline - } - - prm := client.InvokePrm{} - prm.SetMethod(method) - prm.SetArgs(int64(p.state), p.key) - prm.InvokePrmOptional = p.InvokePrmOptional - - return c.client.Invoke(ctx, prm) -} diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go deleted file mode 100644 index bc00eb889..000000000 --- a/pkg/morph/client/nns.go +++ /dev/null @@ -1,173 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "math/big" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -const ( - nnsContractID = 1 // NNS contract must be deployed first in the sidechain - - // NNSBalanceContractName is a name of the balance contract in NNS. - NNSBalanceContractName = "balance.frostfs" - // NNSContainerContractName is a name of the container contract in NNS. - NNSContainerContractName = "container.frostfs" - // NNSFrostFSIDContractName is a name of the frostfsid contract in NNS. - NNSFrostFSIDContractName = "frostfsid.frostfs" - // NNSNetmapContractName is a name of the netmap contract in NNS. - NNSNetmapContractName = "netmap.frostfs" - // NNSProxyContractName is a name of the proxy contract in NNS. - NNSProxyContractName = "proxy.frostfs" - // NNSGroupKeyName is a name for the FrostFS group key record in NNS. - NNSGroupKeyName = "group.frostfs" - // NNSPolicyContractName is a name of the policy contract in NNS. - NNSPolicyContractName = "policy.frostfs" -) - -// ErrNNSRecordNotFound means that there is no such record in NNS contract. -var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") - -// NNSAlphabetContractName returns contract name of the alphabet contract in NNS -// based on alphabet index. -func NNSAlphabetContractName(index int) string { - return "alphabet" + strconv.Itoa(index) + ".frostfs" -} - -// NNSContractAddress returns contract address script hash based on its name -// in NNS contract. -// If script hash has not been found, returns ErrNNSRecordNotFound. -func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint160{}, ErrConnectionLost - } - - sh, err = nnsResolve(c.nnsReader, name) - if err != nil { - return sh, fmt.Errorf("NNS.resolve: %w", err) - } - return sh, nil -} - -func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { - available, err := r.IsAvailable(domain) - if err != nil { - return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) - } - - if available { - return nil, ErrNNSRecordNotFound - } - - return r.Resolve(domain, big.NewInt(int64(nns.TXT))) -} - -func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { - arr, err := nnsResolveItem(r, domain) - if err != nil { - return util.Uint160{}, err - } - - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") - } - bs, err := arr[0].TryBytes() - if err != nil { - return util.Uint160{}, fmt.Errorf("malformed response: %w", err) - } - - // We support several formats for hash encoding, this logic should be maintained in sync - // with parseNNSResolveResult from cmd/frostfs-adm/internal/modules/morph/initialize_nns.go - h, err := util.Uint160DecodeStringLE(string(bs)) - if err == nil { - return h, nil - } - - h, err = address.StringToUint160(string(bs)) - if err == nil { - return h, nil - } - - return util.Uint160{}, errors.New("no valid hashes are found") -} - -// SetGroupSignerScope makes the default signer scope include all FrostFS contracts. -// Should be called for side-chain client only. -func (c *Client) SetGroupSignerScope() error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - pub, err := c.contractGroupKey() - if err != nil { - return err - } - - // Don't change c before everything is OK. - cfg := c.cfg - cfg.signer = &transaction.Signer{ - Scopes: transaction.CustomGroups | transaction.CalledByEntry, - AllowedGroups: []*keys.PublicKey{pub}, - } - rpcActor, err := newActor(c.client, c.acc, cfg) - if err != nil { - return err - } - c.cfg = cfg - c.setActor(rpcActor) - return nil -} - -// contractGroupKey returns public key designating FrostFS contract group. -func (c *Client) contractGroupKey() (*keys.PublicKey, error) { - success := false - startedAt := time.Now() - defer func() { - c.cache.metrics.AddMethodDuration("GroupKey", success, time.Since(startedAt)) - }() - - if gKey := c.cache.groupKey(); gKey != nil { - success = true - return gKey, nil - } - - arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) - if err != nil { - return nil, err - } - - if len(arr) == 0 { - return nil, errors.New("NNS record is missing") - } - - bs, err := arr[0].TryBytes() - if err != nil { - return nil, err - } - - pub, err := keys.NewPublicKeyFromString(string(bs)) - if err != nil { - return nil, err - } - - c.cache.setGroupKey(pub) - - success = true - return pub, nil -} diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go deleted file mode 100644 index 448702613..000000000 --- a/pkg/morph/client/notary.go +++ /dev/null @@ -1,806 +0,0 @@ -package client - -import ( - "context" - "crypto/elliptic" - "encoding/binary" - "errors" - "fmt" - "math" - "math/big" - "strings" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/hash" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/neorpc" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/notary" - sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "go.uber.org/zap" -) - -type ( - notaryInfo struct { - txValidTime uint32 // minimum amount of blocks when mainTx will be valid - roundTime uint32 // extra amount of blocks to synchronize sidechain height diff of inner ring nodes - - alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - - proxy util.Uint160 - } - - notaryCfg struct { - proxy util.Uint160 - - txValidTime, roundTime uint32 - - alphabetSource AlphabetKeys - } - - AlphabetKeys func() (keys.PublicKeys, error) - NotaryOption func(*notaryCfg) -) - -const ( - defaultNotaryValidTime = 50 - defaultNotaryRoundTime = 100 - - setDesignateMethod = "designateAsRole" - - notaryNotEnabledPanicMsg = "notary support was not enabled on this client" -) - -func defaultNotaryConfig(c *Client) *notaryCfg { - return ¬aryCfg{ - txValidTime: defaultNotaryValidTime, - roundTime: defaultNotaryRoundTime, - alphabetSource: c.Committee, - } -} - -// EnableNotarySupport creates notary structure in client that provides -// ability for client to get alphabet keys from committee or provided source -// and use proxy contract script hash to create tx for notary contract. -func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - cfg := defaultNotaryConfig(c) - - for _, opt := range opts { - opt(cfg) - } - - if cfg.proxy.Equals(util.Uint160{}) { - var err error - - cfg.proxy, err = c.NNSContractAddress(NNSProxyContractName) - if err != nil { - return fmt.Errorf("get proxy contract addess from NNS: %w", err) - } - } - - notaryCfg := ¬aryInfo{ - proxy: cfg.proxy, - txValidTime: cfg.txValidTime, - roundTime: cfg.roundTime, - alphabetSource: cfg.alphabetSource, - } - - c.notary = notaryCfg - - return nil -} - -// IsNotaryEnabled returns true if EnableNotarySupport has been successfully -// called before. -func (c *Client) IsNotaryEnabled() bool { - return c.notary != nil -} - -// ProbeNotary checks if native `Notary` contract is presented on chain. -func (c *Client) ProbeNotary() (res bool) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return false - } - - _, err := c.client.GetContractStateByAddressOrName(nativenames.Notary) - return err == nil -} - -// DepositNotary calls notary deposit method. Deposit is required to operate -// with notary contract. It used by notary contract in to produce fallback tx -// if main tx failed to create. Deposit isn't last forever, so it should -// be called periodically. Notary support should be enabled in client to -// use this function. -// -// This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint256{}, ErrConnectionLost - } - - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - bc, err := c.rpcActor.GetBlockCount() - if err != nil { - return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err) - } - - r := notary.NewReader(c.rpcActor) - currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash()) - if err != nil { - return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err) - } - - till := max(int64(bc+delta), int64(currentTill)) - res, _, err := c.depositNotary(ctx, amount, till) - return res, err -} - -// DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`, -// this method sets notary deposit till parameter to a maximum possible value. -// This allows to avoid ValidAfterDeposit failures. -// -// This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint256{}, 0, ErrConnectionLost - } - - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - // till value refers to a block height and it is uint32 value in neo-go - return c.depositNotary(ctx, amount, math.MaxUint32) -} - -func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { - txHash, vub, err := c.gasToken.Transfer( - c.accAddr, - notary.Hash, - big.NewInt(int64(amount)), - []any{c.acc.PrivateKey().GetScriptHash(), till}) - if err != nil { - if !errors.Is(err, neorpc.ErrAlreadyExists) { - return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err) - } - - // Transaction is already in mempool waiting to be processed. - // This is an expected situation if we restart the service. - c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade, - zap.Int64("amount", int64(amount)), - zap.Int64("expire_at", till), - zap.Uint32("vub", vub), - zap.Error(err)) - return util.Uint256{}, 0, nil - } - - c.logger.Info(ctx, logs.ClientNotaryDepositInvoke, - zap.Int64("amount", int64(amount)), - zap.Int64("expire_at", till), - zap.Uint32("vub", vub), - zap.Stringer("tx_hash", txHash.Reverse())) - - return txHash, vub, nil -} - -// GetNotaryDeposit returns deposit of client's account in notary contract. -// Notary support should be enabled in client to use this function. -// -// This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) GetNotaryDeposit() (res int64, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, ErrConnectionLost - } - - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - sh := c.acc.PrivateKey().PublicKey().GetScriptHash() - - r := notary.NewReader(c.rpcActor) - bigIntDeposit, err := r.BalanceOf(sh) - if err != nil { - return 0, fmt.Errorf("get notary deposit: %w", err) - } - - return bigIntDeposit.Int64(), nil -} - -// UpdateNotaryListPrm groups parameters of UpdateNotaryList operation. -type UpdateNotaryListPrm struct { - list keys.PublicKeys - hash util.Uint256 -} - -// SetList sets a list of the new notary role keys. -func (u *UpdateNotaryListPrm) SetList(list keys.PublicKeys) { - u.list = list -} - -// SetHash sets hash of the transaction that led to the update -// of the notary role in the designate contract. -func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) { - u.hash = hash -} - -// UpdateNotaryList updates list of notary nodes in designate contract. Requires -// committee multi signature. -// -// This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) - if err != nil { - return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) - } - - return c.notaryInvokeAsCommittee( - ctx, - setDesignateMethod, - nonce, - vub, - noderoles.P2PNotary, - prm.list, - ) -} - -// UpdateAlphabetListPrm groups parameters of UpdateNeoFSAlphabetList operation. -type UpdateAlphabetListPrm struct { - list keys.PublicKeys - hash util.Uint256 -} - -// SetList sets a list of the new alphabet role keys. -func (u *UpdateAlphabetListPrm) SetList(list keys.PublicKeys) { - u.list = list -} - -// SetHash sets hash of the transaction that led to the update -// of the alphabet role in the designate contract. -func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) { - u.hash = hash -} - -// UpdateNeoFSAlphabetList updates list of alphabet nodes in designate contract. -// As for sidechain list should contain all inner ring nodes. -// Requires committee multi signature. -// -// This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) - if err != nil { - return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) - } - - return c.notaryInvokeAsCommittee( - ctx, - setDesignateMethod, - nonce, - vub, - noderoles.NeoFSAlphabet, - prm.list, - ) -} - -// NotaryInvoke invokes contract method by sending tx to notary contract in -// blockchain. Fallback tx is a `RET`. If Notary support is not enabled -// it fallbacks to a simple `Invoke()`. -// -// Returns valid until block value. -// -// `nonce` and `vub` are used only if notary is enabled. -func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return InvokeRes{}, ErrConnectionLost - } - - if c.notary == nil { - return c.Invoke(ctx, contract, fee, method, args...) - } - - return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...) -} - -// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's -// private key in Invocation script. It means that main TX of notary request is -// not expected to be signed by the current node. -// -// Considered to be used by non-IR nodes. -func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return InvokeRes{}, ErrConnectionLost - } - - if c.notary == nil { - return c.Invoke(ctx, contract, fee, method, args...) - } - - return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...) -} - -// NotarySignAndInvokeTX signs and sends notary request that was received from -// Notary service. -// NOTE: does not fallback to simple `Invoke()`. Expected to be used only for -// TXs retrieved from the received notary requests. -func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return ErrConnectionLost - } - - alphabetList, err := c.notary.alphabetSource() - if err != nil { - return fmt.Errorf("fetch current alphabet keys: %w", err) - } - - cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList) - if err != nil { - return err - } - - nAct, err := notary.NewActor(c.client, cosigners, c.acc) - if err != nil { - return err - } - - // Sign exactly the same transaction we've got from the received Notary request. - err = nAct.Sign(mainTx) - if err != nil { - return fmt.Errorf("faield to sign notary request: %w", err) - } - - mainH, fbH, untilActual, err := nAct.Notarize(mainTx, nil) - - if err != nil && !alreadyOnChainError(err) { - return err - } - - c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked, - zap.String("tx_hash", mainH.StringLE()), - zap.Uint32("valid_until_block", untilActual), - zap.String("fallback_hash", fbH.StringLE())) - - return nil -} - -func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error { - designate := c.GetDesignateHash() - _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...) - return err -} - -func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) { - start := time.Now() - success := false - defer func() { - c.metrics.ObserveInvoke("notaryInvoke", contract.String(), method, success, time.Since(start)) - }() - - alphabetList, err := c.notary.alphabetSource() - if err != nil { - return InvokeRes{}, err - } - - until, err := c.getUntilValue(vub) - if err != nil { - return InvokeRes{}, err - } - - cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee) - if err != nil { - return InvokeRes{}, err - } - - nAct, err := notary.NewActor(c.client, cosigners, c.acc) - if err != nil { - return InvokeRes{}, err - } - - mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { - if r.State != vmstate.Halt.String() { - return ¬HaltStateError{state: r.State, exception: r.FaultException} - } - - t.ValidUntilBlock = until - t.Nonce = nonce - - return nil - }, args...)) - - if err != nil && !alreadyOnChainError(err) { - return InvokeRes{}, err - } - - c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked, - zap.String("method", method), - zap.Uint32("valid_until_block", untilActual), - zap.String("tx_hash", mainH.StringLE()), - zap.String("fallback_hash", fbH.StringLE())) - - success = true - return InvokeRes{Hash: mainH, VUB: until}, nil -} - -func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) { - multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, false, true) - if err != nil { - return nil, err - } - - // Here we need to add a committee signature (second witness) to the pre-validated - // main transaction without creating a new one. However, Notary actor demands the - // proper set of signers for constructor, thus, fill it from the main transaction's signers list. - s := make([]actor.SignerAccount, 2, 3) - s[0] = actor.SignerAccount{ - // Proxy contract that will pay for the execution. - Signer: mainTx.Signers[0], - Account: notary.FakeContractAccount(mainTx.Signers[0].Account), - } - s[1] = actor.SignerAccount{ - // Inner ring multisignature. - Signer: mainTx.Signers[1], - Account: multiaddrAccount, - } - if len(mainTx.Signers) > 3 { - // Invoker signature (simple signature account of storage node is expected). - var acc *wallet.Account - script := mainTx.Scripts[2].VerificationScript - if len(script) == 0 { - acc = notary.FakeContractAccount(mainTx.Signers[2].Account) - } else { - pubBytes, ok := vm.ParseSignatureContract(script) - if ok { - pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err) - } - acc = notary.FakeSimpleAccount(pub) - } else { - m, pubsBytes, ok := vm.ParseMultiSigContract(script) - if !ok { - return nil, errors.New("parse verification script of signer #2: unknown witness type") - } - pubs := make(keys.PublicKeys, len(pubsBytes)) - for i := range pubs { - pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err) - } - } - acc, err = notary.FakeMultisigAccount(m, pubs) - if err != nil { - return nil, fmt.Errorf("create fake account for signer #2: %w", err) - } - } - } - s = append(s, actor.SignerAccount{ - Signer: mainTx.Signers[2], - Account: acc, - }) - } - - return s, nil -} - -func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, committee bool) ([]actor.SignerAccount, error) { - multiaddrAccount, err := c.notaryMultisigAccount(ir, committee, invokedByAlpha) - if err != nil { - return nil, err - } - s := make([]actor.SignerAccount, 2, 3) - // Proxy contract that will pay for the execution. - s[0] = actor.SignerAccount{ - Signer: transaction.Signer{ - Account: c.notary.proxy, - // Do not change this: - // We must be able to call NNS contract indirectly from the Container contract. - // Thus, CalledByEntry is not sufficient. - // In future we may restrict this to all the usecases we have. - Scopes: transaction.Global, - }, - Account: notary.FakeContractAccount(c.notary.proxy), - } - // Inner ring multisignature. - s[1] = actor.SignerAccount{ - Signer: transaction.Signer{ - Account: multiaddrAccount.ScriptHash(), - Scopes: c.cfg.signer.Scopes, - AllowedContracts: c.cfg.signer.AllowedContracts, - AllowedGroups: c.cfg.signer.AllowedGroups, - }, - Account: multiaddrAccount, - } - - if !invokedByAlpha { - // Invoker signature. - s = append(s, actor.SignerAccount{ - Signer: transaction.Signer{ - Account: hash.Hash160(c.acc.GetVerificationScript()), - Scopes: c.cfg.signer.Scopes, - AllowedContracts: c.cfg.signer.AllowedContracts, - AllowedGroups: c.cfg.signer.AllowedGroups, - }, - Account: c.acc, - }) - } - - // The last one is Notary contract that will be added to the signers list - // by Notary actor automatically. - return s, nil -} - -func (c *Client) getUntilValue(vub *uint32) (uint32, error) { - if vub != nil { - return *vub, nil - } - return c.notaryTxValidationLimit() -} - -func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedByAlpha bool) (*wallet.Account, error) { - m := sigCount(ir, committee) - - var multisigAccount *wallet.Account - var err error - if invokedByAlpha { - multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) - err := multisigAccount.ConvertMultisig(m, ir) - if err != nil { - return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) - } - } else { - // alphabet multisig redeem script is - // used as verification script for - // inner ring multiaddress witness - multisigAccount, err = notary.FakeMultisigAccount(m, ir) - if err != nil { - return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) - } - } - - return multisigAccount, nil -} - -func (c *Client) notaryTxValidationLimit() (uint32, error) { - bc, err := c.rpcActor.GetBlockCount() - if err != nil { - return 0, fmt.Errorf("get current blockchain height: %w", err) - } - - minTime := bc + c.notary.txValidTime - rounded := (minTime/c.notary.roundTime + 1) * c.notary.roundTime - - return rounded, nil -} - -// sigCount returns the number of required signature. -// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT). -// If committee is true, returns M as N/2+1. -func sigCount(ir []*keys.PublicKey, committee bool) int { - if committee { - return sc.GetMajorityHonestNodeCount(len(ir)) - } - return sc.GetDefaultHonestNodeCount(len(ir)) -} - -// WithTxValidTime returns a notary support option for client -// that specifies minimum amount of blocks when mainTx will be valid. -func WithTxValidTime(t uint32) NotaryOption { - return func(c *notaryCfg) { - c.txValidTime = t - } -} - -// WithRoundTime returns a notary support option for client -// that specifies extra blocks to synchronize side chain -// height diff of inner ring nodes. -func WithRoundTime(t uint32) NotaryOption { - return func(c *notaryCfg) { - c.roundTime = t - } -} - -// WithAlphabetSource returns a notary support option for client -// that specifies function to return list of alphabet node keys. -// By default notary subsystem uses committee as a source. This is -// valid for side chain but notary in main chain should override it. -func WithAlphabetSource(t AlphabetKeys) NotaryOption { - return func(c *notaryCfg) { - c.alphabetSource = t - } -} - -// WithProxyContract sets proxy contract hash. -func WithProxyContract(h util.Uint160) NotaryOption { - return func(c *notaryCfg) { - c.proxy = h - } -} - -// Neo RPC node can return `neorpc.ErrInvalidAttribute` error with -// `conflicting transaction <> is already on chain` message. This -// error is expected and ignored. As soon as main tx persisted on -// chain everything is fine. This happens because notary contract -// requires 5 out of 7 signatures to send main tx, thus last two -// notary requests may be processed after main tx appeared on chain. -func alreadyOnChainError(err error) bool { - if !errors.Is(err, neorpc.ErrInvalidAttribute) { - return false - } - - const alreadyOnChainErrorMessage = "already on chain" - - return strings.Contains(err.Error(), alreadyOnChainErrorMessage) -} - -// CalculateNotaryDepositAmount calculates notary deposit amount -// using the rule: -// -// IF notaryBalance < gasBalance * gasMul { -// DEPOSIT gasBalance / gasDiv -// } ELSE { -// DEPOSIT 1 -// } -// -// gasMul and gasDiv must be positive. -func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) { - notaryBalance, err := c.GetNotaryDeposit() - if err != nil { - return 0, fmt.Errorf("get notary balance: %w", err) - } - - gasBalance, err := c.GasBalance() - if err != nil { - return 0, fmt.Errorf("get GAS balance: %w", err) - } - - if gasBalance == 0 { - return 0, errors.New("zero gas balance, nothing to deposit") - } - - var depositAmount int64 - - if gasBalance*gasMul > notaryBalance { - depositAmount = gasBalance / gasDiv - } else { - depositAmount = 1 - } - - return fixedn.Fixed8(depositAmount), nil -} - -// CalculateNonceAndVUB calculates nonce and ValidUntilBlock values -// based on transaction hash. -func (c *Client) CalculateNonceAndVUB(hash *util.Uint256) (nonce uint32, vub uint32, err error) { - return c.calculateNonceAndVUB(hash, false) -} - -// CalculateNonceAndVUBControl calculates nonce and rounded ValidUntilBlock values -// based on transaction hash for use in control transactions. -func (c *Client) CalculateNonceAndVUBControl(hash *util.Uint256) (nonce uint32, vub uint32, err error) { - return c.calculateNonceAndVUB(hash, true) -} - -// If hash specified, transaction's height and hash are used to compute VUB and nonce. -// If not, then current block height used to compute VUB and nonce. -func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool) (nonce uint32, vub uint32, err error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return 0, 0, ErrConnectionLost - } - - if c.notary == nil { - return 0, 0, nil - } - - var height uint32 - - if hash != nil { - height, err = c.getTransactionHeight(*hash) - if err != nil { - return 0, 0, fmt.Errorf("get transaction height: %w", err) - } - } else { - height, err = c.rpcActor.GetBlockCount() - if err != nil { - return 0, 0, fmt.Errorf("get chain height: %w", err) - } - } - - // For control transactions, we round down the block height to control the - // probability of all nodes producing the same transaction, since it depends - // on this value. - if roundBlockHeight { - inc := c.rpcActor.GetVersion().Protocol.MaxValidUntilBlockIncrement - height = height / inc * inc - } - - if hash != nil { - return binary.LittleEndian.Uint32(hash.BytesLE()), height + c.notary.txValidTime, nil - } - return height + c.notary.txValidTime, height + c.notary.txValidTime, nil -} - -func (c *Client) getTransactionHeight(h util.Uint256) (uint32, error) { - success := false - startedAt := time.Now() - defer func() { - c.cache.metrics.AddMethodDuration("TxHeight", success, time.Since(startedAt)) - }() - - if rh, ok := c.cache.txHeights.Get(h); ok { - success = true - return rh, nil - } - height, err := c.client.GetTransactionHeight(h) - if err != nil { - return 0, err - } - c.cache.txHeights.Add(h, height) - success = true - return height, nil -} diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go deleted file mode 100644 index 35204bb36..000000000 --- a/pkg/morph/client/notifications.go +++ /dev/null @@ -1,108 +0,0 @@ -package client - -import ( - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Close closes connection to the remote side making -// this client instance unusable. Closes notification -// channel returned from Client.NotificationChannel(), -// Removes all subscription. -func (c *Client) Close() { - // closing should be done via the channel - // to prevent switching to another RPC node - // in the notification loop - if c.closed.CompareAndSwap(false, true) { - close(c.closeChan) - } - c.wg.Wait() -} - -// ReceiveExecutionNotifications performs subscription for notifications -// generated during contract execution. Events are sent to the specified channel. -// -// Returns ErrConnectionLost if client has not been able to establish -// connection to any of passed RPC endpoints. -func (c *Client) ReceiveExecutionNotifications(contract util.Uint160, ch chan<- *state.ContainedNotificationEvent) (string, error) { - c.switchLock.Lock() - defer c.switchLock.Unlock() - - if c.inactive { - return "", ErrConnectionLost - } - - return c.client.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, ch) -} - -// ReceiveBlocks performs subscription for new block events. Events are sent -// to the specified channel. -// -// Returns ErrConnectionLost if client has not been able to establish -// connection to any of passed RPC endpoints. -func (c *Client) ReceiveBlocks(ch chan<- *block.Block) (string, error) { - c.switchLock.Lock() - defer c.switchLock.Unlock() - - if c.inactive { - return "", ErrConnectionLost - } - - return c.client.ReceiveBlocks(nil, ch) -} - -// ReceiveNotaryRequests performsn subscription for notary request payloads -// addition or removal events to this instance of client. Passed txSigner is -// used as filter: subscription is only for the notary requests that must be -// signed by txSigner. Events are sent to the specified channel. -// -// Returns ErrConnectionLost if client has not been able to establish -// connection to any of passed RPC endpoints. -func (c *Client) ReceiveNotaryRequests(txSigner util.Uint160, ch chan<- *result.NotaryRequestEvent) (string, error) { - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - c.switchLock.Lock() - defer c.switchLock.Unlock() - - if c.inactive { - return "", ErrConnectionLost - } - - return c.client.ReceiveNotaryRequests(&neorpc.NotaryRequestFilter{Signer: &txSigner}, ch) -} - -// Unsubscribe performs unsubscription for the given subscription ID. -// -// Returns ErrConnectionLost if client has not been able to establish -// connection to any of passed RPC endpoints. -func (c *Client) Unsubscribe(subID string) error { - c.switchLock.Lock() - defer c.switchLock.Unlock() - - if c.inactive { - return ErrConnectionLost - } - - return c.client.Unsubscribe(subID) -} - -// UnsubscribeAll removes all active subscriptions of current client. -// -// Returns ErrConnectionLost if client has not been able to establish -// connection to any of passed RPC endpoints. -func (c *Client) UnsubscribeAll() error { - c.switchLock.Lock() - defer c.switchLock.Unlock() - - if c.inactive { - return ErrConnectionLost - } - - err := c.client.UnsubscribeAll() - return err -} diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go deleted file mode 100644 index c4eb120d2..000000000 --- a/pkg/morph/client/static.go +++ /dev/null @@ -1,241 +0,0 @@ -package client - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// StaticClient is a wrapper over Neo:Morph client -// that invokes single smart contract methods with fixed fee. -// -// Working static client must be created via constructor NewStatic. -// Using the StaticClient that has been created with new(StaticClient) -// expression (or just declaring a StaticClient variable) is unsafe -// and can lead to panic. -type StaticClient struct { - staticOpts - - client *Client // neo-go client instance - - scScriptHash util.Uint160 // contract script-hash -} - -type staticOpts struct { - tryNotary bool - alpha bool // use client's key to sign notary request's main TX - - fee fixedn.Fixed8 -} - -// WithNotary returns notary status of the client. -// -// See also TryNotary. -func (s *StaticClient) WithNotary() bool { - return s.client.IsNotaryEnabled() -} - -// IsAlpha returns Alphabet status of the client. -// -// See also AsAlphabet. -func (s *StaticClient) IsAlpha() bool { - return s.alpha -} - -// StaticClientOption allows to set an optional -// parameter of StaticClient. -type StaticClientOption func(*staticOpts) - -// NewStatic creates, initializes and returns the StaticClient instance. -// -// If provided Client instance is nil, ErrNilClient is returned. -// -// Specified fee is used by default. Per-operation fees can be customized via WithCustomFee option. -func NewStatic(client *Client, scriptHash util.Uint160, fee fixedn.Fixed8, opts ...StaticClientOption) (*StaticClient, error) { - if client == nil { - return nil, ErrNilClient - } - - c := &StaticClient{ - client: client, - scScriptHash: scriptHash, - } - - c.fee = fee - - for i := range opts { - opts[i](&c.staticOpts) - } - - return c, nil -} - -// Morph return wrapped raw morph client. -func (s StaticClient) Morph() *Client { - return s.client -} - -// InvokePrm groups parameters of the Invoke operation. -type InvokePrm struct { - TestInvokePrm - - // optional parameters - InvokePrmOptional -} - -// InvokePrmOptional groups optional parameters of the Invoke operation. -type InvokePrmOptional struct { - // hash is an optional hash of the transaction - // that generated the notification that required - // to invoke notary request. - // It is used to generate same but unique nonce and - // `validUntilBlock` values by all notification - // receivers. - hash *util.Uint256 - // controlTX controls whether the invoke method will use a rounded - // block height value, which is useful for control transactions which - // are required to be produced by all nodes with very high probability. - // It's only used by notary transactions and it affects only the - // computation of `validUntilBlock` values. - controlTX bool - // vub is used to set custom valid until block value. - vub uint32 -} - -// SetHash sets optional hash of the transaction. -// If hash is set and notary is enabled, StaticClient -// uses it for notary nonce and `validUntilBlock` -// calculation. -func (i *InvokePrmOptional) SetHash(hash util.Uint256) { - i.hash = &hash -} - -// SetControlTX sets whether a control transaction will be used. -func (i *InvokePrmOptional) SetControlTX(b bool) { - i.controlTX = b -} - -// IsControl gets whether a control transaction will be used. -func (i *InvokePrmOptional) IsControl() bool { - return i.controlTX -} - -// SetVUB sets valid until block value. -func (i *InvokePrmOptional) SetVUB(v uint32) { - i.vub = v -} - -type InvokeRes struct { - Hash util.Uint256 - VUB uint32 -} - -// Invoke calls Invoke method of Client with static internal script hash and fee. -// Supported args types are the same as in Client. -// -// If TryNotary is provided: -// - if AsAlphabet is provided, calls NotaryInvoke; -// - otherwise, calls NotaryInvokeNotAlpha. -// -// If fee for the operation executed using specified method is customized, then StaticClient uses it. -// Otherwise, default fee is used. -func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) { - var vubP *uint32 - if s.tryNotary { - if s.alpha { - var ( - nonce uint32 = 1 - vub uint32 - err error - ) - - if prm.hash != nil { - if prm.controlTX { - nonce, vub, err = s.client.CalculateNonceAndVUBControl(prm.hash) - } else { - nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash) - } - if err != nil { - return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err) - } - - vubP = &vub - } - - if prm.vub > 0 { - vubP = &prm.vub - } - - return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) - } - - if prm.vub > 0 { - vubP = &prm.vub - } - - return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...) - } - - return s.client.Invoke( - ctx, - s.scScriptHash, - s.fee, - prm.method, - prm.args..., - ) -} - -// TestInvokePrm groups parameters of the TestInvoke operation. -type TestInvokePrm struct { - method string - args []any -} - -// SetMethod sets method of the contract to call. -func (ti *TestInvokePrm) SetMethod(method string) { - ti.method = method -} - -// SetArgs sets arguments of the contact call. -func (ti *TestInvokePrm) SetArgs(args ...any) { - ti.args = args -} - -// TestInvoke calls TestInvoke method of Client with static internal script hash. -func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) { - _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method) - defer span.End() - return s.client.TestInvoke( - s.scScriptHash, - prm.method, - prm.args..., - ) -} - -// ContractAddress returns the address of the associated contract. -func (s StaticClient) ContractAddress() util.Uint160 { - return s.scScriptHash -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() StaticClientOption { - return func(o *staticOpts) { - o.tryNotary = true - } -} - -// AsAlphabet returns option to sign main TX -// of notary requests with client's private -// key. -// -// Considered to be used by IR nodes only. -func AsAlphabet() StaticClientOption { - return func(o *staticOpts) { - o.alpha = true - } -} diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go deleted file mode 100644 index f7b6705a8..000000000 --- a/pkg/morph/client/util.go +++ /dev/null @@ -1,108 +0,0 @@ -package client - -import ( - "fmt" - "math/big" - - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -/* - Use these function to parse stack parameters obtained from `TestInvoke` - function to native go types. You should know upfront return types of invoked - method. -*/ - -// BoolFromStackItem receives boolean value from the value of a smart contract parameter. -func BoolFromStackItem(param stackitem.Item) (bool, error) { - switch param.Type() { - case stackitem.BooleanT, stackitem.IntegerT, stackitem.ByteArrayT: - return param.TryBool() - default: - return false, fmt.Errorf("chain/client: %s is not a bool type", param.Type()) - } -} - -// IntFromStackItem receives numerical value from the value of a smart contract parameter. -func IntFromStackItem(param stackitem.Item) (int64, error) { - switch param.Type() { - case stackitem.IntegerT, stackitem.ByteArrayT: - i, err := param.TryInteger() - if err != nil { - return 0, err - } - - return i.Int64(), nil - default: - return 0, fmt.Errorf("chain/client: %s is not an integer type", param.Type()) - } -} - -// BigIntFromStackItem receives numerical value from the value of a smart contract parameter. -func BigIntFromStackItem(param stackitem.Item) (*big.Int, error) { - return param.TryInteger() -} - -// BytesFromStackItem receives binary value from the value of a smart contract parameter. -func BytesFromStackItem(param stackitem.Item) ([]byte, error) { - switch param.Type() { - case stackitem.BufferT, stackitem.ByteArrayT: - return param.TryBytes() - case stackitem.IntegerT: - n, err := param.TryInteger() - if err != nil { - return nil, fmt.Errorf("parse integer bytes: %w", err) - } - - return n.Bytes(), nil - case stackitem.AnyT: - if param.Value() == nil { - return nil, nil - } - fallthrough - default: - return nil, fmt.Errorf("chain/client: %s is not a byte array type", param.Type()) - } -} - -// ArrayFromStackItem returns the slice contract parameters from passed parameter. -// -// If passed parameter carries boolean false value, returns (nil, nil). -func ArrayFromStackItem(param stackitem.Item) ([]stackitem.Item, error) { - switch param.Type() { - case stackitem.AnyT: - return nil, nil - case stackitem.ArrayT, stackitem.StructT: - items, ok := param.Value().([]stackitem.Item) - if !ok { - return nil, fmt.Errorf("chain/client: can't convert %T to parameter slice", param.Value()) - } - - return items, nil - default: - return nil, fmt.Errorf("chain/client: %s is not an array type", param.Type()) - } -} - -// StringFromStackItem receives string value from the value of a smart contract parameter. -func StringFromStackItem(param stackitem.Item) (string, error) { - if param.Type() != stackitem.ByteArrayT { - return "", fmt.Errorf("chain/client: %s is not an string type", param.Type()) - } - - return stackitem.ToString(param) -} - -func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { - return func(r *result.Invoke, t *transaction.Transaction) error { - if r.State != HaltState { - return ¬HaltStateError{state: r.State, exception: r.FaultException} - } - - t.SystemFee += add - - return nil - } -} diff --git a/pkg/morph/client/util_test.go b/pkg/morph/client/util_test.go deleted file mode 100644 index 897a02333..000000000 --- a/pkg/morph/client/util_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package client - -import ( - "math" - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/encoding/bigint" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -var ( - bigIntValue = new(big.Int).Mul(big.NewInt(math.MaxInt64), big.NewInt(10)) - - stringByteItem = stackitem.NewByteArray([]byte("Hello World")) - intItem = stackitem.NewBigInteger(new(big.Int).SetInt64(1)) - bigIntItem = stackitem.NewBigInteger(bigIntValue) - byteWithIntItem = stackitem.NewByteArray([]byte{0x0a}) - byteWithBigIntItem = stackitem.NewByteArray(bigint.ToBytes(bigIntValue)) - emptyByteArrayItem = stackitem.NewByteArray([]byte{}) - trueBoolItem = stackitem.NewBool(true) - falseBoolItem = stackitem.NewBool(false) - arrayItem = stackitem.NewArray([]stackitem.Item{intItem, stringByteItem}) - anyTypeItem = stackitem.Null{} -) - -func TestBoolFromStackItem(t *testing.T) { - t.Run("true assert", func(t *testing.T) { - val, err := BoolFromStackItem(trueBoolItem) - require.NoError(t, err) - require.True(t, val) - - val, err = BoolFromStackItem(intItem) - require.NoError(t, err) - require.True(t, val) - }) - - t.Run("false assert", func(t *testing.T) { - val, err := BoolFromStackItem(falseBoolItem) - require.NoError(t, err) - require.False(t, val) - - val, err = BoolFromStackItem(emptyByteArrayItem) - require.NoError(t, err) - require.False(t, val) - }) - - t.Run("incorrect assert", func(t *testing.T) { - _, err := BoolFromStackItem(arrayItem) - require.Error(t, err) - }) -} - -func TestArrayFromStackItem(t *testing.T) { - t.Run("correct assert", func(t *testing.T) { - val, err := ArrayFromStackItem(arrayItem) - require.NoError(t, err) - require.Len(t, val, len(arrayItem.Value().([]stackitem.Item))) - }) - t.Run("incorrect assert", func(t *testing.T) { - _, err := ArrayFromStackItem(stringByteItem) - require.Error(t, err) - }) - t.Run("nil array case", func(t *testing.T) { - val, err := ArrayFromStackItem(anyTypeItem) - require.NoError(t, err) - require.Nil(t, val) - }) -} - -func TestBytesFromStackItem(t *testing.T) { - t.Run("correct assert", func(t *testing.T) { - val, err := BytesFromStackItem(stringByteItem) - require.NoError(t, err) - require.Equal(t, stringByteItem.Value().([]byte), val) - - val, err = BytesFromStackItem(intItem) - require.NoError(t, err) - require.Equal(t, intItem.Value().(*big.Int).Bytes(), val) - }) - - t.Run("incorrect assert", func(t *testing.T) { - _, err := BytesFromStackItem(arrayItem) - require.Error(t, err) - }) -} - -func TestIntFromStackItem(t *testing.T) { - t.Run("correct assert", func(t *testing.T) { - val, err := IntFromStackItem(intItem) - require.NoError(t, err) - require.Equal(t, intItem.Value().(*big.Int).Int64(), val) - - val, err = IntFromStackItem(byteWithIntItem) - require.NoError(t, err) - require.Equal(t, int64(0x0a), val) - - val, err = IntFromStackItem(emptyByteArrayItem) - require.NoError(t, err) - require.Equal(t, int64(0), val) - }) - - t.Run("incorrect assert", func(t *testing.T) { - _, err := IntFromStackItem(arrayItem) - require.Error(t, err) - }) -} - -func TestBigIntFromStackItem(t *testing.T) { - t.Run("correct assert", func(t *testing.T) { - val, err := BigIntFromStackItem(bigIntItem) - require.NoError(t, err) - require.Equal(t, bigIntValue, val) - - val, err = BigIntFromStackItem(byteWithBigIntItem) - require.NoError(t, err) - require.Equal(t, bigIntValue, val) - - val, err = BigIntFromStackItem(emptyByteArrayItem) - require.NoError(t, err) - require.Equal(t, big.NewInt(0), val) - }) - - t.Run("incorrect assert", func(t *testing.T) { - _, err := BigIntFromStackItem(arrayItem) - require.Error(t, err) - }) -} - -func TestStringFromStackItem(t *testing.T) { - t.Run("correct assert", func(t *testing.T) { - val, err := StringFromStackItem(stringByteItem) - require.NoError(t, err) - require.Equal(t, string(stringByteItem.Value().([]byte)), val) - }) - - t.Run("incorrect assert", func(t *testing.T) { - _, err := StringFromStackItem(intItem) - require.Error(t, err) - }) -} diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go deleted file mode 100644 index 87fcf84b8..000000000 --- a/pkg/morph/client/waiter.go +++ /dev/null @@ -1,51 +0,0 @@ -package client - -import ( - "context" - "fmt" - - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" -) - -type waiterClient struct { - c *Client -} - -func (w *waiterClient) Context() context.Context { - return context.Background() -} - -func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { - return w.c.GetApplicationLog(hash, trig) -} - -func (w *waiterClient) GetBlockCount() (uint32, error) { - return w.c.BlockCount() -} - -func (w *waiterClient) GetVersion() (*result.Version, error) { - return w.c.GetVersion() -} - -// WaitTxHalt waits until transaction with the specified hash persists on the blockchain. -// It also checks execution result to finish in HALT state. -func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error { - w, err := waiter.NewPollingBased(&waiterClient{c: c}) - if err != nil { - return fmt.Errorf("create tx waiter: %w", err) - } - - res, err := w.WaitAny(ctx, vub, h) - if err != nil { - return fmt.Errorf("wait until tx persists: %w", err) - } - - if res.VMState.HasFlag(vmstate.Halt) { - return nil - } - return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} -} diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go deleted file mode 100644 index 99f80584a..000000000 --- a/pkg/morph/event/balance/lock.go +++ /dev/null @@ -1,63 +0,0 @@ -package balance - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Lock structure of balance.Lock notification from morph chain. -type Lock struct { - id []byte - user util.Uint160 - lock util.Uint160 - amount int64 // Fixed16 - until int64 - - // txHash is used in notary environmental - // for calculating unique but same for - // all notification receivers values. - txHash util.Uint256 -} - -// MorphEvent implements Neo:Morph Event interface. -func (Lock) MorphEvent() {} - -// ID is a withdraw transaction hash. -func (l Lock) ID() []byte { return l.id } - -// User returns withdraw receiver script hash from main net. -func (l Lock) User() util.Uint160 { return l.user } - -// LockAccount return script hash for balance contract wallet. -func (l Lock) LockAccount() util.Uint160 { return l.lock } - -// Amount of the locked assets. -func (l Lock) Amount() int64 { return l.amount } - -// Until is an epoch before locked account exists. -func (l Lock) Until() int64 { return l.until } - -// TxHash returns hash of the TX with lock -// notification. -func (l Lock) TxHash() util.Uint256 { return l.txHash } - -// ParseLock from notification into lock structure. -func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) { - var le balance.LockEvent - if err := le.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse balance.LockEvent: %w", err) - } - - return Lock{ - id: le.TxID, - user: le.From, - lock: le.To, - amount: le.Amount.Int64(), - until: le.Until.Int64(), - txHash: e.Container, - }, nil -} diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go deleted file mode 100644 index 87b91aede..000000000 --- a/pkg/morph/event/balance/lock_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package balance - -import ( - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseLock(t *testing.T) { - var ( - id = []byte("Hello World") - user = util.Uint160{0x1, 0x2, 0x3} - lock = util.Uint160{0x3, 0x2, 0x1} - - amount int64 = 10 - until int64 = 20 - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseLock(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong id parameter", func(t *testing.T) { - _, err := ParseLock(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong from parameter", func(t *testing.T) { - _, err := ParseLock(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong lock parameter", func(t *testing.T) { - _, err := ParseLock(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong amount parameter", func(t *testing.T) { - _, err := ParseLock(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewByteArray(lock.BytesBE()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong until parameter", func(t *testing.T) { - _, err := ParseLock(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewByteArray(lock.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseLock(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewByteArray(lock.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewBigInteger(new(big.Int).SetInt64(until)), - })) - - require.NoError(t, err) - require.Equal(t, Lock{ - id: id, - user: user, - lock: lock, - amount: amount, - until: until, - }, ev) - }) -} - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go deleted file mode 100644 index d28f6d521..000000000 --- a/pkg/morph/event/container/delete.go +++ /dev/null @@ -1,71 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -// Delete structure of container.Delete notification from morph chain. -type Delete struct { - ContainerIDValue []byte - SignatureValue []byte - TokenValue []byte - PublicKeyValue []byte - - // For notary notifications only. - // Contains raw transactions of notary request. - NotaryRequestValue *payload.P2PNotaryRequest -} - -// MorphEvent implements Neo:Morph Event interface. -func (Delete) MorphEvent() {} - -// ContainerID is a marshalled container structure, defined in API. -func (d Delete) ContainerID() []byte { return d.ContainerIDValue } - -// Signature of marshalled container by container owner. -func (d Delete) Signature() []byte { return d.SignatureValue } - -// SessionToken returns binary token of the session -// within which the eACL was set. -func (d Delete) SessionToken() []byte { - return d.TokenValue -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (d Delete) NotaryRequest() *payload.P2PNotaryRequest { - return d.NotaryRequestValue -} - -const expectedItemNumDelete = 4 - -// DeleteSuccess structures notification event of successful container removal -// thrown by Container contract. -type DeleteSuccess struct { - // Identifier of the removed container. - ID cid.ID -} - -// MorphEvent implements Neo:Morph Event interface. -func (DeleteSuccess) MorphEvent() {} - -// ParseDeleteSuccess decodes notification event thrown by Container contract into -// DeleteSuccess and returns it as event.Event. -func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - var dse container.DeleteSuccessEvent - if err := dse.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err) - } - - var cnr cid.ID - cnr.SetSHA256(dse.ContainerID) - return DeleteSuccess{ - ID: cnr, - }, nil -} diff --git a/pkg/morph/event/container/delete_notary.go b/pkg/morph/event/container/delete_notary.go deleted file mode 100644 index 9711636e7..000000000 --- a/pkg/morph/event/container/delete_notary.go +++ /dev/null @@ -1,73 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -func (d *Delete) setContainerID(v []byte) { - if v != nil { - d.ContainerIDValue = v - } -} - -func (d *Delete) setSignature(v []byte) { - if v != nil { - d.SignatureValue = v - } -} - -func (d *Delete) setPublicKey(v []byte) { - d.PublicKeyValue = v -} - -func (d *Delete) setToken(v []byte) { - if v != nil { - d.TokenValue = v - } -} - -var deleteFieldSetters = []func(*Delete, []byte){ - // order on stack is reversed - (*Delete).setToken, - (*Delete).setPublicKey, - (*Delete).setSignature, - (*Delete).setContainerID, -} - -const ( - // DeleteNotaryEvent is method name for container delete operations - // in `Container` contract. Is used as identificator for notary - // delete container requests. - DeleteNotaryEvent = "delete" -) - -// ParseDeleteNotary from NotaryEvent into container event structure. -func ParseDeleteNotary(ne event.NotaryEvent) (event.Event, error) { - var ( - ev Delete - currentOp opcode.Opcode - ) - - fieldNum := 0 - - for _, op := range ne.Params() { - currentOp = op.Code() - - switch { - case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4: - if fieldNum == expectedItemNumDelete { - return nil, event.UnexpectedArgNumErr(DeleteNotaryEvent) - } - - deleteFieldSetters[fieldNum](&ev, op.Param()) - fieldNum++ - default: - return nil, event.UnexpectedOpcode(DeleteNotaryEvent, op.Code()) - } - } - - ev.NotaryRequestValue = ne.Raw() - - return ev, nil -} diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go deleted file mode 100644 index 62e7d7277..000000000 --- a/pkg/morph/event/container/delete_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package container - -import ( - "crypto/sha256" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseDeleteSuccess(t *testing.T) { - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseDeleteSuccess(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong container parameter", func(t *testing.T) { - _, err := ParseDeleteSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - - _, err = ParseDeleteSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray([]byte{1, 2, 3}), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - id := cidtest.ID() - - binID := make([]byte, sha256.Size) - id.Encode(binID) - - ev, err := ParseDeleteSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - })) - - require.NoError(t, err) - - require.Equal(t, DeleteSuccess{ - ID: id, - }, ev) - }) -} diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go deleted file mode 100644 index b09394ba4..000000000 --- a/pkg/morph/event/container/put.go +++ /dev/null @@ -1,91 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -// Put structure of container.Put notification from morph chain. -type Put struct { - rawContainer []byte - signature []byte - publicKey []byte - token []byte - - // For notary notifications only. - // Contains raw transactions of notary request. - notaryRequest *payload.P2PNotaryRequest -} - -const expectedItemNumPut = 4 - -// MorphEvent implements Neo:Morph Event interface. -func (Put) MorphEvent() {} - -// Container is a marshalled container structure, defined in API. -func (p Put) Container() []byte { return p.rawContainer } - -// Signature of marshalled container by container owner. -func (p Put) Signature() []byte { return p.signature } - -// PublicKey of container owner. -func (p Put) PublicKey() []byte { return p.publicKey } - -// SessionToken returns binary token of the session -// within which the container was created. -func (p Put) SessionToken() []byte { - return p.token -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (p Put) NotaryRequest() *payload.P2PNotaryRequest { - return p.notaryRequest -} - -// PutNamed represents notification event spawned by PutNamed method from Container contract of FrostFS Morph chain. -type PutNamed struct { - Put - - name, zone string -} - -// Name returns "name" arg of contract call. -func (x PutNamed) Name() string { - return x.name -} - -// Zone returns "zone" arg of contract call. -func (x PutNamed) Zone() string { - return x.zone -} - -// PutSuccess structures notification event of successful container creation -// thrown by Container contract. -type PutSuccess struct { - // Identifier of the newly created container. - ID cid.ID -} - -// MorphEvent implements Neo:Morph Event interface. -func (PutSuccess) MorphEvent() {} - -// ParsePutSuccess decodes notification event thrown by Container contract into -// PutSuccess and returns it as event.Event. -func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - var pse container.PutSuccessEvent - if err := pse.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err) - } - - var cnr cid.ID - cnr.SetSHA256(pse.ContainerID) - return PutSuccess{ - ID: cnr, - }, nil -} diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go deleted file mode 100644 index 6b2ee7b0a..000000000 --- a/pkg/morph/event/container/put_notary.go +++ /dev/null @@ -1,123 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -func (p *Put) setRawContainer(v []byte) { - if v != nil { - p.rawContainer = v - } -} - -func (p *Put) setSignature(v []byte) { - if v != nil { - p.signature = v - } -} - -func (p *Put) setPublicKey(v []byte) { - if v != nil { - p.publicKey = v - } -} - -func (p *Put) setToken(v []byte) { - if v != nil { - p.token = v - } -} - -var putFieldSetters = []func(*Put, []byte){ - // order on stack is reversed - (*Put).setToken, - (*Put).setPublicKey, - (*Put).setSignature, - (*Put).setRawContainer, -} - -const ( - // PutNotaryEvent is method name for container put operations - // in `Container` contract. Is used as identificator for notary - // put container requests. - PutNotaryEvent = "put" - - // PutNamedNotaryEvent is an ID of notary "put named container" notification. - PutNamedNotaryEvent = "putNamed" -) - -func parsePutNotary(ev *Put, raw *payload.P2PNotaryRequest, ops []event.Op) error { - var ( - currentOp opcode.Opcode - fieldNum = 0 - ) - - for _, op := range ops { - currentOp = op.Code() - - switch { - case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4: - if fieldNum == expectedItemNumPut { - return event.UnexpectedArgNumErr(PutNotaryEvent) - } - - putFieldSetters[fieldNum](ev, op.Param()) - fieldNum++ - default: - return event.UnexpectedOpcode(PutNotaryEvent, op.Code()) - } - } - - ev.notaryRequest = raw - - return nil -} - -// ParsePutNotary from NotaryEvent into container event structure. -func ParsePutNotary(ne event.NotaryEvent) (event.Event, error) { - var ev Put - - err := parsePutNotary(&ev, ne.Raw(), ne.Params()) - if err != nil { - return nil, err - } - - return ev, nil -} - -// ParsePutNamedNotary parses PutNamed event structure from generic event.NotaryEvent. -func ParsePutNamedNotary(ne event.NotaryEvent) (event.Event, error) { - ops := ne.Params() - - const putNamedAdditionalArgs = 2 // PutNamed has same args as Put + (name, zone) (2) - - if len(ops) != expectedItemNumPut+putNamedAdditionalArgs { - return nil, event.UnexpectedArgNumErr(PutNamedNotaryEvent) - } - - var ( - ev PutNamed - err error - ) - - ev.zone, err = event.StringFromOpcode(ops[0]) - if err != nil { - return nil, fmt.Errorf("parse arg zone: %w", err) - } - - ev.name, err = event.StringFromOpcode(ops[1]) - if err != nil { - return nil, fmt.Errorf("parse arg name: %w", err) - } - - err = parsePutNotary(&ev.Put, ne.Raw(), ops[putNamedAdditionalArgs:]) - if err != nil { - return nil, err - } - - return ev, nil -} diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go deleted file mode 100644 index dd5c7ea93..000000000 --- a/pkg/morph/event/container/put_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package container - -import ( - "crypto/sha256" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParsePutSuccess(t *testing.T) { - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - } - - _, err := ParsePutSuccess(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong container ID parameter", func(t *testing.T) { - _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - id := cidtest.ID() - - binID := make([]byte, sha256.Size) - id.Encode(binID) - - t.Run("wrong public key parameter", func(t *testing.T) { - t.Run("wrong type", func(t *testing.T) { - _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - t.Run("garbage data", func(t *testing.T) { - _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - stackitem.NewByteArray([]byte("key")), - })) - require.Error(t, err) - }) - }) - - t.Run("correct behavior", func(t *testing.T) { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - stackitem.NewByteArray(pk.PublicKey().Bytes()), - })) - - require.NoError(t, err) - - require.Equal(t, PutSuccess{ - ID: id, - }, ev) - }) -} diff --git a/pkg/morph/event/container/util_test.go b/pkg/morph/event/container/util_test.go deleted file mode 100644 index 159f6cd9f..000000000 --- a/pkg/morph/event/container/util_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package container - -import ( - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go deleted file mode 100644 index cf56464b8..000000000 --- a/pkg/morph/event/frostfs/cheque.go +++ /dev/null @@ -1,53 +0,0 @@ -package frostfs - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Cheque structure of frostfs.Cheque notification from mainnet chain. -type Cheque struct { - IDValue []byte - AmountValue int64 // Fixed8 - UserValue util.Uint160 - LockValue util.Uint160 -} - -// MorphEvent implements Neo:Morph Event interface. -func (Cheque) MorphEvent() {} - -// ID is a withdraw transaction hash. -func (c Cheque) ID() []byte { return c.IDValue } - -// User returns withdraw receiver script hash from main net. -func (c Cheque) User() util.Uint160 { return c.UserValue } - -// Amount of the sent assets. -func (c Cheque) Amount() int64 { return c.AmountValue } - -// LockAccount return script hash for balance contract wallet. -func (c Cheque) LockAccount() util.Uint160 { return c.LockValue } - -// ParseCheque from notification into cheque structure. -func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { - var ce frostfs.ChequeEvent - if err := ce.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err) - } - - lock, err := util.Uint160DecodeBytesBE(ce.LockAccount) - if err != nil { - return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err) - } - - return Cheque{ - IDValue: ce.Id, - AmountValue: ce.Amount.Int64(), - UserValue: ce.User, - LockValue: lock, - }, nil -} diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go deleted file mode 100644 index d92b7922b..000000000 --- a/pkg/morph/event/frostfs/cheque_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package frostfs - -import ( - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseCheque(t *testing.T) { - var ( - id = []byte("Hello World") - user = util.Uint160{0x1, 0x2, 0x3} - lock = util.Uint160{0x3, 0x2, 0x1} - - amount int64 = 10 - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseCheque(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong id parameter", func(t *testing.T) { - _, err := ParseCheque(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong user parameter", func(t *testing.T) { - _, err := ParseCheque(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong amount parameter", func(t *testing.T) { - _, err := ParseCheque(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong lock parameter", func(t *testing.T) { - _, err := ParseCheque(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseCheque(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(lock.BytesBE()), - })) - - require.NoError(t, err) - require.Equal(t, Cheque{ - IDValue: id, - AmountValue: amount, - UserValue: user, - LockValue: lock, - }, ev) - }) -} - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go deleted file mode 100644 index 805e80f3c..000000000 --- a/pkg/morph/event/frostfs/config.go +++ /dev/null @@ -1,50 +0,0 @@ -package frostfs - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type Config struct { - KeyValue []byte - ValueValue []byte - IDValue []byte - - // TxHashValue is used in notary environmental - // for calculating unique but same for - // all notification receivers values. - TxHashValue util.Uint256 -} - -// TxHash returns hash of the TX with new epoch -// notification. -func (u Config) TxHash() util.Uint256 { - return u.TxHashValue -} - -// MorphEvent implements Neo:Morph Event interface. -func (Config) MorphEvent() {} - -func (u Config) ID() []byte { return u.IDValue } - -func (u Config) Key() []byte { return u.KeyValue } - -func (u Config) Value() []byte { return u.ValueValue } - -func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) { - var sce frostfs.SetConfigEvent - if err := sce.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err) - } - - return Config{ - KeyValue: sce.Key, - ValueValue: sce.Value, - IDValue: sce.Id, - TxHashValue: e.Container, - }, nil -} diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go deleted file mode 100644 index 8acc8c15c..000000000 --- a/pkg/morph/event/frostfs/config_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package frostfs - -import ( - "testing" - - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseConfig(t *testing.T) { - var ( - id = []byte("id") - key = []byte("key") - value = []byte("value") - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - } - - _, err := ParseConfig(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong first parameter", func(t *testing.T) { - _, err := ParseConfig(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong second parameter", func(t *testing.T) { - _, err := ParseConfig(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong third parameter", func(t *testing.T) { - _, err := ParseConfig(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(key), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct", func(t *testing.T) { - ev, err := ParseConfig(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(key), - stackitem.NewByteArray(value), - })) - require.NoError(t, err) - - require.Equal(t, Config{ - IDValue: id, - KeyValue: key, - ValueValue: value, - }, ev) - }) -} diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go deleted file mode 100644 index fcb01577e..000000000 --- a/pkg/morph/event/frostfs/deposit.go +++ /dev/null @@ -1,48 +0,0 @@ -package frostfs - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Deposit structure of frostfs.Deposit notification from mainnet chain. -type Deposit struct { - IDValue []byte - AmountValue int64 // Fixed8 - FromValue util.Uint160 - ToValue util.Uint160 -} - -// MorphEvent implements Neo:Morph Event interface. -func (Deposit) MorphEvent() {} - -// ID is a deposit transaction hash. -func (d Deposit) ID() []byte { return d.IDValue } - -// From is a script hash of asset sender in main net. -func (d Deposit) From() util.Uint160 { return d.FromValue } - -// To is a script hash of asset receiver in balance contract. -func (d Deposit) To() util.Uint160 { return d.ToValue } - -// Amount of transferred assets. -func (d Deposit) Amount() int64 { return d.AmountValue } - -// ParseDeposit notification into deposit structure. -func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { - var de frostfs.DepositEvent - if err := de.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err) - } - - return Deposit{ - IDValue: de.TxHash[:], - AmountValue: de.Amount.Int64(), - FromValue: de.From, - ToValue: de.Receiver, - }, nil -} diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go deleted file mode 100644 index 38d3e61f6..000000000 --- a/pkg/morph/event/frostfs/deposit_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package frostfs - -import ( - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseDeposit(t *testing.T) { - var ( - id = util.Uint256{0, 1, 2, 3} - from = util.Uint160{0x1, 0x2, 0x3} - to = util.Uint160{0x3, 0x2, 0x1} - - amount int64 = 10 - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseDeposit(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong from parameter", func(t *testing.T) { - _, err := ParseDeposit(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong amount parameter", func(t *testing.T) { - _, err := ParseDeposit(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(from.BytesBE()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong to parameter", func(t *testing.T) { - _, err := ParseDeposit(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(from.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong id parameter", func(t *testing.T) { - _, err := ParseDeposit(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(from.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(to.BytesBE()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseDeposit(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(from.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(to.BytesBE()), - stackitem.NewByteArray(id[:]), - })) - - require.NoError(t, err) - require.Equal(t, Deposit{ - IDValue: id[:], - AmountValue: amount, - FromValue: from, - ToValue: to, - }, ev) - }) -} diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go deleted file mode 100644 index 2568b6512..000000000 --- a/pkg/morph/event/frostfs/withdraw.go +++ /dev/null @@ -1,43 +0,0 @@ -package frostfs - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Withdraw structure of frostfs.Withdraw notification from mainnet chain. -type Withdraw struct { - IDValue []byte - AmountValue int64 // Fixed8 - UserValue util.Uint160 -} - -// MorphEvent implements Neo:Morph Event interface. -func (Withdraw) MorphEvent() {} - -// ID is a withdraw transaction hash. -func (w Withdraw) ID() []byte { return w.IDValue } - -// User returns withdraw receiver script hash from main net. -func (w Withdraw) User() util.Uint160 { return w.UserValue } - -// Amount of the withdraw assets. -func (w Withdraw) Amount() int64 { return w.AmountValue } - -// ParseWithdraw notification into withdraw structure. -func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) { - var we frostfs.WithdrawEvent - if err := we.FromStackItem(e.Item); err != nil { - return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err) - } - - return Withdraw{ - IDValue: we.TxHash[:], - AmountValue: we.Amount.Int64(), - UserValue: we.User, - }, nil -} diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go deleted file mode 100644 index e382305e6..000000000 --- a/pkg/morph/event/frostfs/withdraw_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package frostfs - -import ( - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseWithdraw(t *testing.T) { - var ( - id = util.Uint256{1, 2, 3} - user = util.Uint160{0x1, 0x2, 0x3} - - amount int64 = 10 - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseWithdraw(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong user parameter", func(t *testing.T) { - _, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong amount parameter", func(t *testing.T) { - _, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong id parameter", func(t *testing.T) { - _, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(user.BytesBE()), - stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(id[:]), - })) - - require.NoError(t, err) - require.Equal(t, Withdraw{ - IDValue: id[:], - AmountValue: amount, - UserValue: user, - }, ev) - }) -} diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go deleted file mode 100644 index 55a514ff1..000000000 --- a/pkg/morph/event/handlers.go +++ /dev/null @@ -1,43 +0,0 @@ -package event - -import ( - "context" - - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Handler is an Event processing function. -type Handler func(context.Context, Event) - -// BlockHandler is a chain block processing function. -type BlockHandler func(context.Context, *block.Block) - -// NotificationHandlerInfo is a structure that groups -// the parameters of the handler of particular -// contract event. -type NotificationHandlerInfo struct { - Contract util.Uint160 - Type Type - Parser NotificationParser - Handlers []Handler -} - -// NotaryHandlerInfo is a structure that groups -// the parameters of the handler of particular -// notary event. -type NotaryHandlerInfo struct { - notaryRequestTypes - - h Handler -} - -// SetHandler is an event handler setter. -func (nhi *NotaryHandlerInfo) SetHandler(v Handler) { - nhi.h = v -} - -// Handler returns an event handler. -func (nhi NotaryHandlerInfo) Handler() Handler { - return nhi.h -} diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go deleted file mode 100644 index e5cdfeef7..000000000 --- a/pkg/morph/event/listener.go +++ /dev/null @@ -1,570 +0,0 @@ -package event - -import ( - "context" - "errors" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" -) - -// Listener is an interface of smart contract notification event listener. -type Listener interface { - // Listen must start the event listener. - // - // Must listen to events with the parser installed. - Listen(context.Context) - - // ListenWithError must start the event listener. - // - // Must listen to events with the parser installed. - // - // Must send error to channel if subscriber channel has been closed or - // it could not be started. - ListenWithError(context.Context, chan<- error) - - // RegisterNotificationHandler must register the event handler for particular notification event of contract. - // - // The specified handler must be called after each capture and parsing of the event. - // - // Must ignore nil handlers. - RegisterNotificationHandler(NotificationHandlerInfo) - - // EnableNotarySupport enables notary request listening. Passed hash is - // notary mainTX signer. In practise, it means that listener will subscribe - // for only notary requests that are going to be paid with passed hash. - // - // Must not be called after Listen or ListenWithError. - EnableNotarySupport(util.Uint160, client.AlphabetKeys, BlockCounter) - - // SetNotaryParser must set the parser of particular notary request event. - // - // Parser of each event must be set once. All parsers must be set before Listen call. - // - // Must ignore nil parsers and all calls after listener has been started. - // - // Has no effect if EnableNotarySupport was not called before Listen or ListenWithError. - SetNotaryParser(NotaryParserInfo) - - // RegisterNotaryHandler must register the event handler for particular notification event of contract. - // - // The specified handler must be called after each capture and parsing of the event. - // - // Must ignore nil handlers. - // - // Has no effect if EnableNotarySupport was not called before Listen or ListenWithError. - RegisterNotaryHandler(NotaryHandlerInfo) - - // RegisterBlockHandler must register chain block handler. - // - // The specified handler must be called after each capture and parsing of the new block from chain. - // - // Must ignore nil handlers. - RegisterBlockHandler(BlockHandler) - - // Stop must stop the event listener. - Stop() -} - -// ListenerParams is a group of parameters -// for Listener constructor. -type ListenerParams struct { - Logger *logger.Logger - - Subscriber subscriber.Subscriber - - WorkerPoolCapacity int -} - -type listener struct { - mtx sync.RWMutex - - wg sync.WaitGroup - - startOnce, stopOnce sync.Once - - notificationParsers map[scriptHashWithType]NotificationParser - notificationHandlers map[scriptHashWithType][]Handler - - listenNotary bool - notaryEventsPreparator NotaryPreparator - notaryParsers map[notaryRequestTypes]NotaryParser - notaryHandlers map[notaryRequestTypes]Handler - notaryMainTXSigner util.Uint160 // filter for notary subscription - - log *logger.Logger - - subscriber subscriber.Subscriber - - blockHandlers []BlockHandler - - pool *ants.Pool -} - -const newListenerFailMsg = "instantiate Listener" - -var ( - errNilLogger = errors.New("nil logger") - - errNilSubscriber = errors.New("nil event subscriber") - - errNotificationSubscrConnectionTerminated = errors.New("event subscriber connection has been terminated") - - errNotarySubscrConnectionTerminated = errors.New("notary event subscriber connection has been terminated") - - errBlockNotificationChannelClosed = errors.New("new block notification channel is closed") -) - -// Listen starts the listening for events with registered handlers. -// -// Executes once, all subsequent calls do nothing. -// -// Returns an error if listener was already started. -func (l *listener) Listen(ctx context.Context) { - l.startOnce.Do(func() { - l.wg.Add(1) - defer l.wg.Done() - - l.listen(ctx, nil) - }) -} - -// ListenWithError starts the listening for events with registered handlers and -// passing error message to intError channel if subscriber channel has been closed. -// -// Executes once, all subsequent calls do nothing. -// -// Returns an error if listener was already started. -func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { - l.startOnce.Do(func() { - l.wg.Add(1) - defer l.wg.Done() - - l.listen(ctx, intError) - }) -} - -func (l *listener) listen(ctx context.Context, intError chan<- error) { - subErrCh := make(chan error) - - go l.subscribe(subErrCh) - - l.listenLoop(ctx, intError, subErrCh) -} - -func (l *listener) subscribe(errCh chan error) { - l.wg.Add(1) - defer l.wg.Done() - // create the list of listening contract hashes - hashes := make([]util.Uint160, 0) - - // fill the list with the contracts with set event parsers. - l.mtx.RLock() - for hashType := range l.notificationParsers { - scHash := hashType.Hash - - // prevent repetitions - for _, hash := range hashes { - if hash.Equals(scHash) { - continue - } - } - - hashes = append(hashes, hashType.Hash) - } - l.mtx.RUnlock() - - err := l.subscriber.SubscribeForNotification(hashes...) - if err != nil { - errCh <- fmt.Errorf("subscribe for notifications: %w", err) - return - } - - if len(l.blockHandlers) > 0 { - if err = l.subscriber.BlockNotifications(); err != nil { - errCh <- fmt.Errorf("subscribe for blocks: %w", err) - return - } - } - - if l.listenNotary { - if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil { - errCh <- fmt.Errorf("subscribe for notary requests: %w", err) - return - } - } -} - -func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error) bool { - if intErr == nil { - return false - } - // This select required because were are reading from error channel and closing listener - // in the same routine when shutting down node. - select { - case <-ctx.Done(): - l.log.Info(ctx, logs.EventStopEventListenerByContext, - zap.String("reason", ctx.Err().Error()), - ) - return false - case intErr <- err: - return true - } -} - -func (l *listener) listenLoop(ctx context.Context, intErr chan<- error, subErrCh chan error) { - chs := l.subscriber.NotificationChannels() - -loop: - for { - select { - case err := <-subErrCh: - if !l.sendError(ctx, intErr, err) { - l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err)) - } - break loop - case <-ctx.Done(): - l.log.Info(ctx, logs.EventStopEventListenerByContext, - zap.String("reason", ctx.Err().Error()), - ) - break loop - case notifyEvent, ok := <-chs.NotificationsCh: - if !ok { - l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel) - l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated) - break loop - } else if notifyEvent == nil { - l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught) - continue loop - } - - l.handleNotifyEvent(ctx, notifyEvent) - case notaryEvent, ok := <-chs.NotaryRequestsCh: - if !ok { - l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel) - l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated) - break loop - } else if notaryEvent == nil { - l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught) - continue loop - } - - l.handleNotaryEvent(ctx, notaryEvent) - case b, ok := <-chs.BlockCh: - if !ok { - l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel) - l.sendError(ctx, intErr, errBlockNotificationChannelClosed) - break loop - } else if b == nil { - l.log.Warn(ctx, logs.EventNilBlockWasCaught) - continue loop - } - - l.handleBlockEvent(ctx, b) - } - } -} - -func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) { - if err := l.pool.Submit(func() { - for i := range l.blockHandlers { - l.blockHandlers[i](ctx, b) - } - }); err != nil { - l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, - zap.Int("capacity", l.pool.Cap())) - } -} - -func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) { - if err := l.pool.Submit(func() { - l.parseAndHandleNotary(ctx, notaryEvent) - }); err != nil { - l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, - zap.Int("capacity", l.pool.Cap())) - } -} - -func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) { - if err := l.pool.Submit(func() { - l.parseAndHandleNotification(ctx, notifyEvent) - }); err != nil { - l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, - zap.Int("capacity", l.pool.Cap())) - } -} - -func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) { - log := l.log.With( - zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()), - ) - - // calculate event type from bytes - typEvent := TypeFromString(notifyEvent.Name) - - log = log.With( - zap.String("event type", notifyEvent.Name), - ) - - // get the event parser - keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent} - - l.mtx.RLock() - parser, ok := l.notificationParsers[keyEvent] - l.mtx.RUnlock() - - if !ok { - log.Debug(ctx, logs.EventEventParserNotSet) - - return - } - - // parse the notification event - event, err := parser(notifyEvent) - if err != nil { - log.Warn(ctx, logs.EventCouldNotParseNotificationEvent, - zap.Error(err), - ) - - return - } - - // handler the event - l.mtx.RLock() - handlers := l.notificationHandlers[keyEvent] - l.mtx.RUnlock() - - if len(handlers) == 0 { - log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, - zap.Any("event", event), - ) - - return - } - - for _, handler := range handlers { - handler(ctx, event) - } -} - -func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) { - // prepare the notary event - notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest) - if err != nil { - var expErr *ExpiredTXError - switch { - case errors.Is(err, ErrTXAlreadyHandled): - case errors.As(err, &expErr): - l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent, - zap.Error(err), - zap.Uint32("current_block_height", expErr.CurrentBlockHeight), - zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight), - ) - default: - l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent, - zap.Error(err), - ) - } - - return - } - - log := l.log.With( - zap.String("contract", notaryEvent.ScriptHash().StringLE()), - zap.Stringer("method", notaryEvent.Type()), - ) - - notaryKey := notaryRequestTypes{} - notaryKey.SetMempoolType(nr.Type) - notaryKey.SetRequestType(notaryEvent.Type()) - notaryKey.SetScriptHash(notaryEvent.ScriptHash()) - - // get notary parser - l.mtx.RLock() - parser, ok := l.notaryParsers[notaryKey] - l.mtx.RUnlock() - - if !ok { - log.Debug(ctx, logs.EventNotaryParserNotSet) - - return - } - - // parse the notary event - event, err := parser(notaryEvent) - if err != nil { - log.Warn(ctx, logs.EventCouldNotParseNotaryEvent, - zap.Error(err), - ) - - return - } - - // handle the event - l.mtx.RLock() - handler, ok := l.notaryHandlers[notaryKey] - l.mtx.RUnlock() - - if !ok { - log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, - zap.Any("event", event), - ) - - return - } - - handler(ctx, event) -} - -// RegisterNotificationHandler registers the handler for particular notification event of contract. -// -// Ignores nil handlers. -// Ignores handlers of event without parser. -func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { - log := l.log.With( - zap.String("contract", hi.Contract.StringLE()), - zap.Stringer("event_type", hi.Type), - ) - - // check if parser was set - l.mtx.Lock() - defer l.mtx.Unlock() - - k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type} - - l.notificationParsers[k] = hi.Parser - l.notificationHandlers[k] = append( - l.notificationHandlers[k], - hi.Handlers..., - ) - - log.Debug(context.Background(), logs.EventRegisteredNewEventHandler) -} - -// EnableNotarySupport enables notary request listening. Passed hash is -// notary mainTX signer. In practise, it means that listener will subscribe -// for only notary requests that are going to be paid with passed hash. -// -// Must not be called after Listen or ListenWithError. -func (l *listener) EnableNotarySupport(mainTXSigner util.Uint160, alphaKeys client.AlphabetKeys, bc BlockCounter) { - l.mtx.Lock() - defer l.mtx.Unlock() - - l.listenNotary = true - l.notaryMainTXSigner = mainTXSigner - l.notaryHandlers = make(map[notaryRequestTypes]Handler) - l.notaryParsers = make(map[notaryRequestTypes]NotaryParser) - l.notaryEventsPreparator = notaryPreparator( - PreparatorPrm{ - AlphaKeys: alphaKeys, - BlockCounter: bc, - }, - ) -} - -// SetNotaryParser sets the parser of particular notary request event. -// -// Ignores nil and already set parsers. -// Ignores the parser if listener is started. -func (l *listener) SetNotaryParser(pi NotaryParserInfo) { - if !l.listenNotary { - return - } - - log := l.log.With( - zap.Stringer("mempool_type", pi.GetMempoolType()), - zap.String("contract", pi.ScriptHash().StringLE()), - zap.Stringer("notary_type", pi.RequestType()), - ) - - l.mtx.Lock() - defer l.mtx.Unlock() - - // add event parser - if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok { - l.notaryParsers[pi.notaryRequestTypes] = pi.parser() - } - - log.Info(context.Background(), logs.EventRegisteredNewEventParser) -} - -// RegisterNotaryHandler registers the handler for particular notification notary request event. -// -// Ignores nil handlers. -// Ignores handlers of event without parser. -func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { - if !l.listenNotary { - return - } - - log := l.log.With( - zap.Stringer("mempool_type", hi.GetMempoolType()), - zap.String("contract", hi.ScriptHash().StringLE()), - zap.Stringer("notary type", hi.RequestType()), - ) - - // check if parser was set - l.mtx.RLock() - _, ok := l.notaryParsers[hi.notaryRequestTypes] - l.mtx.RUnlock() - - if !ok { - log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser) - return - } - - // add notary event handler - l.mtx.Lock() - l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler() - l.mtx.Unlock() - - log.Info(context.Background(), logs.EventRegisteredNewEventHandler) -} - -// Stop closes subscription channel with remote neo node. -func (l *listener) Stop() { - l.stopOnce.Do(func() { - l.subscriber.Close() - l.pool.Release() - }) - l.wg.Wait() -} - -func (l *listener) RegisterBlockHandler(handler BlockHandler) { - l.blockHandlers = append(l.blockHandlers, handler) -} - -// NewListener create the notification event listener instance and returns Listener interface. -func NewListener(p ListenerParams) (Listener, error) { - switch { - case p.Logger == nil: - return nil, fmt.Errorf("%s: %w", newListenerFailMsg, errNilLogger) - case p.Subscriber == nil: - return nil, fmt.Errorf("%s: %w", newListenerFailMsg, errNilSubscriber) - } - - // The pool here must be blocking, otherwise notifications could be dropped. - // The default capacity is 0, which means "infinite". - pool, err := ants.NewPool(p.WorkerPoolCapacity) - if err != nil { - return nil, fmt.Errorf("init worker pool: %w", err) - } - - return &listener{ - notificationParsers: make(map[scriptHashWithType]NotificationParser), - notificationHandlers: make(map[scriptHashWithType][]Handler), - log: p.Logger, - subscriber: p.Subscriber, - pool: pool, - }, nil -} diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go deleted file mode 100644 index 87f37305f..000000000 --- a/pkg/morph/event/listener_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package event - -import ( - "context" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestEventHandling(t *testing.T) { - blockCh := make(chan *block.Block) - notificationCh := make(chan *state.ContainedNotificationEvent) - notaryRequestsCh := make(chan *result.NotaryRequestEvent) - - l, err := NewListener(ListenerParams{ - Logger: test.NewLogger(t), - Subscriber: &testSubscriber{ - blockCh: blockCh, - notificationCh: notificationCh, - notaryRequestsCh: notaryRequestsCh, - }, - WorkerPoolCapacity: 10, - }) - require.NoError(t, err, "failed to create listener") - - list := l.(*listener) - - blockHandled := make(chan bool) - handledBlocks := make([]*block.Block, 0) - l.RegisterBlockHandler(func(_ context.Context, b *block.Block) { - handledBlocks = append(handledBlocks, b) - blockHandled <- true - }) - - notificationHandled := make(chan bool) - handledNotifications := make([]Event, 0) - l.RegisterNotificationHandler(NotificationHandlerInfo{ - Contract: util.Uint160{100}, - Type: TypeFromString("notification type"), - Parser: func(cne *state.ContainedNotificationEvent) (Event, error) { - return testNotificationEvent{source: cne}, nil - }, - Handlers: []Handler{ - func(_ context.Context, e Event) { - handledNotifications = append(handledNotifications, e) - notificationHandled <- true - }, - }, - }) - - go list.Listen(context.Background()) - - t.Run("handles block events", func(t *testing.T) { - block := &block.Block{} - - blockCh <- block - - <-blockHandled - - require.Equal(t, 1, len(handledBlocks), "invalid handled blocks length") - require.Equal(t, block, handledBlocks[0], "invalid handled block") - }) - - t.Run("handles notifications", func(t *testing.T) { - notification := &state.ContainedNotificationEvent{ - Container: util.Uint256{49}, - NotificationEvent: state.NotificationEvent{ - ScriptHash: util.Uint160{100}, - Name: "notification type", - }, - } - - notificationCh <- notification - - <-notificationHandled - require.EqualValues(t, []Event{testNotificationEvent{source: notification}}, handledNotifications, "invalid handled notifications") - }) -} - -func TestErrorPassing(t *testing.T) { - blockCh := make(chan *block.Block) - notificationCh := make(chan *state.ContainedNotificationEvent) - notaryRequestsCh := make(chan *result.NotaryRequestEvent) - - t.Run("notification error", func(t *testing.T) { - nErr := fmt.Errorf("notification error") - l, err := NewListener(ListenerParams{ - Logger: test.NewLogger(t), - Subscriber: &testSubscriber{ - blockCh: blockCh, - notificationCh: notificationCh, - notaryRequestsCh: notaryRequestsCh, - - notificationErr: nErr, - }, - WorkerPoolCapacity: 10, - }) - require.NoError(t, err, "failed to create listener") - - errCh := make(chan error) - - go l.ListenWithError(context.Background(), errCh) - - err = <-errCh - - require.ErrorIs(t, err, nErr, "invalid notification error") - }) - - t.Run("block error", func(t *testing.T) { - bErr := fmt.Errorf("notification error") - l, err := NewListener(ListenerParams{ - Logger: test.NewLogger(t), - Subscriber: &testSubscriber{ - blockCh: blockCh, - notificationCh: notificationCh, - notaryRequestsCh: notaryRequestsCh, - - blockErr: bErr, - }, - WorkerPoolCapacity: 10, - }) - require.NoError(t, err, "failed to create listener") - l.RegisterBlockHandler(func(context.Context, *block.Block) {}) - - errCh := make(chan error) - - go l.ListenWithError(context.Background(), errCh) - - err = <-errCh - - require.ErrorIs(t, err, bErr, "invalid block error") - }) -} - -type testSubscriber struct { - blockCh chan *block.Block - notificationCh chan *state.ContainedNotificationEvent - notaryRequestsCh chan *result.NotaryRequestEvent - - blockErr error - notificationErr error -} - -func (s *testSubscriber) SubscribeForNotification(...util.Uint160) error { - return s.notificationErr -} -func (s *testSubscriber) UnsubscribeForNotification() {} -func (s *testSubscriber) BlockNotifications() error { - return s.blockErr -} - -func (s *testSubscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error { - return nil -} - -func (s *testSubscriber) NotificationChannels() subscriber.NotificationChannels { - return subscriber.NotificationChannels{ - BlockCh: s.blockCh, - NotificationsCh: s.notificationCh, - NotaryRequestsCh: s.notaryRequestsCh, - } -} - -func (s *testSubscriber) Close() {} - -type testNotificationEvent struct { - source *state.ContainedNotificationEvent -} - -func (e testNotificationEvent) MorphEvent() {} diff --git a/pkg/morph/event/netmap/add_peer.go b/pkg/morph/event/netmap/add_peer.go deleted file mode 100644 index 80c5559fc..000000000 --- a/pkg/morph/event/netmap/add_peer.go +++ /dev/null @@ -1,28 +0,0 @@ -package netmap - -import ( - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -type AddPeer struct { - NodeBytes []byte - - // For notary notifications only. - // Contains raw transactions of notary request. - Request *payload.P2PNotaryRequest -} - -// MorphEvent implements Neo:Morph Event interface. -func (AddPeer) MorphEvent() {} - -func (s AddPeer) Node() []byte { - return s.NodeBytes -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (s AddPeer) NotaryRequest() *payload.P2PNotaryRequest { - return s.Request -} - -const expectedItemNumAddPeer = 1 diff --git a/pkg/morph/event/netmap/add_peer_notary.go b/pkg/morph/event/netmap/add_peer_notary.go deleted file mode 100644 index a24722a97..000000000 --- a/pkg/morph/event/netmap/add_peer_notary.go +++ /dev/null @@ -1,49 +0,0 @@ -package netmap - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -func (s *AddPeer) setNode(v []byte) { - if v != nil { - s.NodeBytes = v - } -} - -const ( - // AddPeerNotaryEvent is method name for netmap `addPeer` operation - // in `Netmap` contract. Is used as identificator for notary - // peer addition requests. - AddPeerNotaryEvent = "addPeer" -) - -// ParseAddPeerNotary from NotaryEvent into netmap event structure. -func ParseAddPeerNotary(ne event.NotaryEvent) (event.Event, error) { - var ( - ev AddPeer - currentOp opcode.Opcode - ) - - fieldNum := 0 - - for _, op := range ne.Params() { - currentOp = op.Code() - - switch { - case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4: - if fieldNum == expectedItemNumAddPeer { - return nil, event.UnexpectedArgNumErr(AddPeerNotaryEvent) - } - - ev.setNode(op.Param()) - fieldNum++ - default: - return nil, event.UnexpectedOpcode(AddPeerNotaryEvent, currentOp) - } - } - - ev.Request = ne.Raw() - - return ev, nil -} diff --git a/pkg/morph/event/netmap/add_peer_test.go b/pkg/morph/event/netmap/add_peer_test.go deleted file mode 100644 index 4118bb8c8..000000000 --- a/pkg/morph/event/netmap/add_peer_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package netmap - -import ( - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go deleted file mode 100644 index 39c8f6237..000000000 --- a/pkg/morph/event/netmap/epoch.go +++ /dev/null @@ -1,47 +0,0 @@ -package netmap - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// NewEpoch is a new epoch Neo:Morph event. -type NewEpoch struct { - Num uint64 - - // Hash is used in notary environmental - // for calculating unique but same for - // all notification receivers values. - Hash util.Uint256 -} - -// MorphEvent implements Neo:Morph Event interface. -func (NewEpoch) MorphEvent() {} - -// EpochNumber returns new epoch number. -func (s NewEpoch) EpochNumber() uint64 { - return s.Num -} - -// TxHash returns hash of the TX with new epoch -// notification. -func (s NewEpoch) TxHash() util.Uint256 { - return s.Hash -} - -// ParseNewEpoch is a parser of new epoch notification event. -// -// Result is type of NewEpoch. -func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { - var nee netmap.NewEpochEvent - if err := nee.FromStackItem(e.Item); err != nil { - return nil, err - } - - return NewEpoch{ - Num: nee.Epoch.Uint64(), - Hash: e.Container, - }, nil -} diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go deleted file mode 100644 index 6ff692327..000000000 --- a/pkg/morph/event/netmap/epoch_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package netmap - -import ( - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseNewEpoch(t *testing.T) { - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseNewEpoch(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong first parameter type", func(t *testing.T) { - _, err := ParseNewEpoch(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - epochNum := uint64(100) - - ev, err := ParseNewEpoch(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum)), - })) - - require.NoError(t, err) - require.Equal(t, NewEpoch{ - Num: epochNum, - }, ev) - }) -} diff --git a/pkg/morph/event/netmap/update_peer.go b/pkg/morph/event/netmap/update_peer.go deleted file mode 100644 index e29671131..000000000 --- a/pkg/morph/event/netmap/update_peer.go +++ /dev/null @@ -1,58 +0,0 @@ -package netmap - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -type UpdatePeer struct { - PubKey *keys.PublicKey - - State netmap.NodeState - - // For notary notifications only. - // Contains raw transactions of notary request. - Request *payload.P2PNotaryRequest -} - -// MorphEvent implements Neo:Morph Event interface. -func (UpdatePeer) MorphEvent() {} - -// Online returns true if node's state is requested to be switched -// to "online". -func (s UpdatePeer) Online() bool { - return s.State == netmap.NodeStateOnline -} - -// Maintenance returns true if node's state is requested to be switched -// to "maintenance". -func (s UpdatePeer) Maintenance() bool { - return s.State == netmap.NodeStateMaintenance -} - -func (s UpdatePeer) PublicKey() *keys.PublicKey { - return s.PubKey -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (s UpdatePeer) NotaryRequest() *payload.P2PNotaryRequest { - return s.Request -} - -func (s *UpdatePeer) decodeState(state int64) error { - switch s.State = netmap.NodeState(state); s.State { - default: - return fmt.Errorf("unsupported node state %d", state) - case - netmap.NodeStateOffline, - netmap.NodeStateOnline, - netmap.NodeStateMaintenance: - return nil - } -} - -const expectedItemNumUpdatePeer = 2 diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go deleted file mode 100644 index 993182ab4..000000000 --- a/pkg/morph/event/netmap/update_peer_notary.go +++ /dev/null @@ -1,79 +0,0 @@ -package netmap - -import ( - "crypto/elliptic" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -var errNilPubKey = errors.New("public key is nil") - -func (s *UpdatePeer) setPublicKey(v []byte) (err error) { - if v == nil { - return errNilPubKey - } - - s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256()) - if err != nil { - return fmt.Errorf("parse public key: %w", err) - } - - return -} - -const ( - // UpdateStateNotaryEvent is method name for netmap state updating - // operations in `Netmap` contract. Is used as identificator for - // notary delete container requests. - UpdateStateNotaryEvent = "updateState" -) - -// ParseUpdatePeerNotary from NotaryEvent into netmap event structure. -func ParseUpdatePeerNotary(ne event.NotaryEvent) (event.Event, error) { - var ( - ev UpdatePeer - err error - - currCode opcode.Opcode - ) - - fieldNum := 0 - - for _, op := range ne.Params() { - currCode = op.Code() - - switch { - case fieldNum == 0 && opcode.PUSHDATA1 <= currCode && currCode <= opcode.PUSHDATA4: - err = ev.setPublicKey(op.Param()) - if err != nil { - return nil, err - } - - fieldNum++ - case fieldNum == 1: - state, err := event.IntFromOpcode(op) - if err != nil { - return nil, err - } - - err = ev.decodeState(state) - if err != nil { - return nil, err - } - - fieldNum++ - case fieldNum == expectedItemNumUpdatePeer: - return nil, event.UnexpectedArgNumErr(UpdateStateNotaryEvent) - default: - return nil, event.UnexpectedOpcode(UpdateStateNotaryEvent, currCode) - } - } - - ev.Request = ne.Raw() - - return ev, nil -} diff --git a/pkg/morph/event/notary.go b/pkg/morph/event/notary.go deleted file mode 100644 index bcfd4f53f..000000000 --- a/pkg/morph/event/notary.go +++ /dev/null @@ -1,56 +0,0 @@ -package event - -import ( - "fmt" - - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -// NotaryType is a notary event enumeration type. -type NotaryType string - -// NotaryEvent is an interface that is -// provided by Neo:Morph notary event -// structures. -type NotaryEvent interface { - ScriptHash() util.Uint160 - Type() NotaryType - Params() []Op - - Raw() *payload.P2PNotaryRequest -} - -// Equal compares two NotaryType values and -// returns true if they are equal. -func (t NotaryType) Equal(t2 NotaryType) bool { - return string(t) == string(t2) -} - -// String returns casted to string NotaryType. -func (t NotaryType) String() string { - return string(t) -} - -// NotaryTypeFromBytes converts bytes slice to NotaryType. -func NotaryTypeFromBytes(data []byte) NotaryType { - return NotaryType(data) -} - -// NotaryTypeFromString converts string to NotaryType. -func NotaryTypeFromString(str string) NotaryType { - return NotaryType(str) -} - -// UnexpectedArgNumErr returns error when notary parsers -// get unexpected amount of argument in contract call. -func UnexpectedArgNumErr(method string) error { - return fmt.Errorf("unexpected arguments amount in %s call", method) -} - -// UnexpectedOpcode returns error when notary parsers -// get unexpected opcode in contract call. -func UnexpectedOpcode(method string, op opcode.Opcode) error { - return fmt.Errorf("unexpected opcode in %s call: %s", method, op) -} diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go deleted file mode 100644 index b11973646..000000000 --- a/pkg/morph/event/notary_preparator.go +++ /dev/null @@ -1,416 +0,0 @@ -package event - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/core/interop/interopnames" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/hash" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -var ( - errNotContractCall = errors.New("received main tx is not a contract call") - errUnexpectedWitnessAmount = errors.New("received main tx has unexpected amount of witnesses") - errUnexpectedCosignersAmount = errors.New("received main tx has unexpected amount of cosigners") - errIncorrectAlphabetSigner = errors.New("received main tx has incorrect Alphabet signer") - errIncorrectProxyWitnesses = errors.New("received main tx has non-empty Proxy witnesses") - errIncorrectInvokerWitnesses = errors.New("received main tx has empty Invoker witness") - errIncorrectAlphabet = errors.New("received main tx has incorrect Alphabet verification") - errIncorrectNotaryPlaceholder = errors.New("received main tx has incorrect Notary contract placeholder") - errIncorrectAttributesAmount = errors.New("received main tx has incorrect attributes amount") - errIncorrectAttribute = errors.New("received main tx has incorrect attribute") - errIncorrectCallFlag = errors.New("received main tx has unexpected call flag") - errIncorrectArgPacking = errors.New("received main tx has incorrect argument packing") - errUnexpectedCONVERT = errors.New("received main tx has unexpected CONVERT opcode") - - errIncorrectFBAttributesAmount = errors.New("received fallback tx has incorrect attributes amount") - errIncorrectFBAttributes = errors.New("received fallback tx has incorrect attributes") - - // ErrTXAlreadyHandled is returned if received TX has already been signed. - ErrTXAlreadyHandled = errors.New("received main tx has already been handled") -) - -// ExpiredTXError is returned if received fallback TX is already valid. -type ExpiredTXError struct { - CurrentBlockHeight uint32 - FallbackTXNotValidBeforeHeight uint32 -} - -func (e *ExpiredTXError) Error() string { - return "received main tx has expired" -} - -// BlockCounter must return block count of the network -// from which notary requests are received. -type BlockCounter interface { - BlockCount() (res uint32, err error) -} - -// PreparatorPrm groups the required parameters of the Preparator constructor. -type PreparatorPrm struct { - AlphaKeys client.AlphabetKeys - - // BlockCount must return block count of the network - // from which notary requests are received. - BlockCounter BlockCounter -} - -// Preparator implements NotaryPreparator interface. -type Preparator struct { - // contractSysCall contract call in NeoVM - contractSysCall []byte - // dummyInvocationScript is invocation script from TX that is not signed. - dummyInvocationScript []byte - - alphaKeys client.AlphabetKeys - - blockCounter BlockCounter -} - -// notaryPreparator inits and returns NotaryPreparator. -// -// Considered to be used for preparing notary request -// for parsing it by event.Listener. -func notaryPreparator(prm PreparatorPrm) NotaryPreparator { - switch { - case prm.AlphaKeys == nil: - panic("alphabet keys source must not be nil") - case prm.BlockCounter == nil: - panic("block counter must not be nil") - } - - contractSysCall := make([]byte, 4) - binary.LittleEndian.PutUint32(contractSysCall, interopnames.ToID([]byte(interopnames.SystemContractCall))) - - dummyInvocationScript := append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) - - return Preparator{ - contractSysCall: contractSysCall, - dummyInvocationScript: dummyInvocationScript, - alphaKeys: prm.AlphaKeys, - blockCounter: prm.BlockCounter, - } -} - -// Prepare converts raw notary requests to NotaryEvent. -// -// Returns ErrTXAlreadyHandled if transaction shouldn't be -// parsed and handled. It is not "error case". Every handled -// transaction is expected to be received one more time -// from the Notary service but already signed. This happens -// since every notary call is a new notary request in fact. -func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { - err := p.validateNotaryRequest(nr) - if err != nil { - return nil, err - } - - var ( - opCode opcode.Opcode - param []byte - ) - - ctx := vm.NewContext(nr.MainTransaction.Script) - ops := make([]Op, 0, 10) // 10 is maximum num of opcodes for calling contracts with 4 args(no arrays of arrays) - - for { - opCode, param, err = ctx.Next() - if err != nil { - return nil, fmt.Errorf("get next opcode in script: %w", err) - } - - if opCode == opcode.RET { - break - } - - ops = append(ops, Op{code: opCode, param: param}) - } - - opsLen := len(ops) - - // check if it is tx with contract call - if !bytes.Equal(ops[opsLen-1].param, p.contractSysCall) { - return nil, errNotContractCall - } - - // retrieve contract's script hash - contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param) - if err != nil { - return nil, fmt.Errorf("decode contract hash: %w", err) - } - - // retrieve contract's method - contractMethod := string(ops[opsLen-3].param) - - // check if there is a call flag(must be in range [0:15)) - callFlag := callflag.CallFlag(ops[opsLen-4].code - opcode.PUSH0) - if callFlag > callflag.All { - return nil, errIncorrectCallFlag - } - - args := ops[:opsLen-4] - - if len(args) != 0 { - err = p.validateParameterOpcodes(args) - if err != nil { - return nil, fmt.Errorf("validate arguments: %w", err) - } - - // without args packing opcodes - args = args[:len(args)-2] - } - - return parsedNotaryEvent{ - hash: contractHash, - notaryType: NotaryTypeFromString(contractMethod), - params: args, - raw: nr, - }, nil -} - -func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { - // notary request's main tx is expected to have - // three or four witnesses: one for proxy contract, - // one for alphabet multisignature, one optional for - // notary's invoker and one is for notary contract - ln := len(nr.MainTransaction.Scripts) - switch ln { - case 3, 4: - default: - return errUnexpectedWitnessAmount - } - invokerWitness := ln == 4 - - // alphabet node should handle only notary requests that do not yet have inner - // ring multisignature filled => such main TXs either have empty invocation script - // of the inner ring witness (in case if Notary Actor is used to create request) - // or have it filled with dummy bytes (if request was created manually with the old - // neo-go API) - // - // this check prevents notary flow recursion - if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && - !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version - return ErrTXAlreadyHandled - } - - currentAlphabet, err := p.alphaKeys() - if err != nil { - return fmt.Errorf("fetch Alphabet public keys: %w", err) - } - - err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet) - if err != nil { - return err - } - - // validate main TX's notary attribute - err = p.validateAttributes(nr.MainTransaction.Attributes, currentAlphabet, invokerWitness) - if err != nil { - return err - } - - // validate main TX's witnesses - err = p.validateWitnesses(nr.MainTransaction.Scripts, currentAlphabet, invokerWitness) - if err != nil { - return err - } - - // validate main TX expiration - return p.validateExpiration(nr.FallbackTransaction) -} - -func (p Preparator) validateParameterOpcodes(ops []Op) error { - l := len(ops) - - if ops[l-1].code != opcode.PACK { - return fmt.Errorf("unexpected packing opcode: %s", ops[l-1].code) - } - - argsLen, err := IntFromOpcode(ops[l-2]) - if err != nil { - return fmt.Errorf("parse argument len: %w", err) - } - - err = validateNestedArgs(argsLen, ops[:l-2]) - return err -} - -func validateNestedArgs(expArgLen int64, ops []Op) error { - var ( - currentCode opcode.Opcode - - opsLenGot = len(ops) - ) - - for i := opsLenGot - 1; i >= 0; i-- { - // only PUSH(also, PACK for arrays and CONVERT for booleans) - // codes are allowed; number of params and their content must - // be checked in a notary parser and a notary handler of a - // particular contract - switch currentCode = ops[i].code; { - case currentCode <= opcode.PUSH16: - case currentCode == opcode.CONVERT: - if i == 0 || ops[i-1].code != opcode.PUSHT && ops[i-1].code != opcode.PUSHF { - return errUnexpectedCONVERT - } - - expArgLen++ - case currentCode == opcode.PACK: - if i == 0 { - return errIncorrectArgPacking - } - - argsLen, err := IntFromOpcode(ops[i-1]) - if err != nil { - return fmt.Errorf("parse argument len: %w", err) - } - - expArgLen += argsLen + 1 - i-- - default: - return fmt.Errorf("received main tx has unexpected(not PUSH) NeoVM opcode: %s", currentCode) - } - } - - if int64(opsLenGot) != expArgLen { - return errIncorrectArgPacking - } - - return nil -} - -func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error { - if len(fbTX.Attributes) != 3 { - return errIncorrectFBAttributesAmount - } - - nvbAttrs := fbTX.GetAttributes(transaction.NotValidBeforeT) - if len(nvbAttrs) != 1 { - return errIncorrectFBAttributes - } - - nvb, ok := nvbAttrs[0].Value.(*transaction.NotValidBefore) - if !ok { - return errIncorrectFBAttributes - } - - currBlock, err := p.blockCounter.BlockCount() - if err != nil { - return fmt.Errorf("fetch current chain height: %w", err) - } - - if currBlock >= nvb.Height { - return &ExpiredTXError{ - CurrentBlockHeight: currBlock, - FallbackTXNotValidBeforeHeight: nvb.Height, - } - } - - return nil -} - -func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alphaKeys keys.PublicKeys) error { - if len(s) != expected { - return errUnexpectedCosignersAmount - } - - alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) - if err != nil { - return fmt.Errorf("get Alphabet verification script: %w", err) - } - - if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) { - return errIncorrectAlphabetSigner - } - - return nil -} - -func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.PublicKeys, invokerWitness bool) error { - // the first one(proxy contract) must have empty - // witnesses - if len(w[0].VerificationScript)+len(w[0].InvocationScript) != 0 { - return errIncorrectProxyWitnesses - } - - alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) - if err != nil { - return fmt.Errorf("get Alphabet verification script: %w", err) - } - - // the second one must be witness of the current - // alphabet multiaccount - if !bytes.Equal(w[1].VerificationScript, alphaVerificationScript) { - return errIncorrectAlphabet - } - - if invokerWitness { - // the optional third one must be an invoker witness - if len(w[2].VerificationScript)+len(w[2].InvocationScript) == 0 { - return errIncorrectInvokerWitnesses - } - } - - // the last one must be a placeholder for notary contract witness - last := len(w) - 1 - if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 - !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version - len(w[last].VerificationScript) != 0 { - return errIncorrectNotaryPlaceholder - } - - return nil -} - -func (p Preparator) validateAttributes(aa []transaction.Attribute, alphaKeys keys.PublicKeys, invokerWitness bool) error { - // main tx must have exactly one attribute - if len(aa) != 1 { - return errIncorrectAttributesAmount - } - - expectedN := uint8(len(alphaKeys)) - if invokerWitness { - expectedN++ - } - - val, ok := aa[0].Value.(*transaction.NotaryAssisted) - if !ok || val.NKeys != expectedN { - return errIncorrectAttribute - } - - return nil -} - -type parsedNotaryEvent struct { - hash util.Uint160 - notaryType NotaryType - params []Op - raw *payload.P2PNotaryRequest -} - -func (p parsedNotaryEvent) ScriptHash() util.Uint160 { - return p.hash -} - -func (p parsedNotaryEvent) Type() NotaryType { - return p.notaryType -} - -func (p parsedNotaryEvent) Params() []Op { - return p.params -} - -func (p parsedNotaryEvent) Raw() *payload.P2PNotaryRequest { - return p.raw -} diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go deleted file mode 100644 index 60ddb4601..000000000 --- a/pkg/morph/event/notary_preparator_test.go +++ /dev/null @@ -1,569 +0,0 @@ -package event - -import ( - "fmt" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/vm" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/core/interop/interopnames" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/hash" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/stretchr/testify/require" -) - -var ( - alphaKeys keys.PublicKeys - wrongAlphaKeys keys.PublicKeys - - dummyAlphabetInvocationScript []byte - dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually - wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...) - - scriptHash util.Uint160 -) - -func init() { - privat, _ := keys.NewPrivateKey() - pub := privat.PublicKey() - - alphaKeys = keys.PublicKeys{pub} - - wrongPrivat, _ := keys.NewPrivateKey() - wrongPub := wrongPrivat.PublicKey() - - wrongAlphaKeys = keys.PublicKeys{wrongPub} - - scriptHash, _ = util.Uint160DecodeStringLE("21fce15191428e9c2f0e8d0329ff6d3dd14882de") -} - -type blockCounter struct { - epoch uint32 - err error -} - -func (b blockCounter) BlockCount() (res uint32, err error) { - return b.epoch, b.err -} - -func TestPrepare_IncorrectScript(t *testing.T) { - preparator := notaryPreparator( - PreparatorPrm{ - alphaKeysSource(), - blockCounter{100, nil}, - }, - ) - - for _, dummyMultisig := range []bool{true, false} { // try both empty and dummy multisig/Notary invocation witness script - t.Run(fmt.Sprintf("not contract call, compat: %t", dummyMultisig), func(t *testing.T) { - bw := io.NewBufBinWriter() - - emit.Int(bw.BinWriter, 4) - emit.String(bw.BinWriter, "test") - emit.Bytes(bw.BinWriter, scriptHash.BytesBE()) - emit.Syscall(bw.BinWriter, interopnames.SystemContractCallNative) // any != interopnames.SystemContractCall - - nr := correctNR(bw.Bytes(), dummyMultisig, false) - - _, err := preparator.Prepare(nr) - - require.EqualError(t, err, errNotContractCall.Error()) - }) - - t.Run(fmt.Sprintf("incorrect, compat: %t", dummyMultisig), func(t *testing.T) { - bw := io.NewBufBinWriter() - - emit.Int(bw.BinWriter, -1) - emit.String(bw.BinWriter, "test") - emit.Bytes(bw.BinWriter, scriptHash.BytesBE()) - emit.Syscall(bw.BinWriter, interopnames.SystemContractCall) - - nr := correctNR(bw.Bytes(), dummyMultisig, false) - - _, err := preparator.Prepare(nr) - - require.EqualError(t, err, errIncorrectCallFlag.Error()) - }) - } -} - -func TestPrepare_IncorrectNR(t *testing.T) { - type ( - mTX struct { - sigs []transaction.Signer - scripts []transaction.Witness - attrs []transaction.Attribute - } - fbTX struct { - attrs []transaction.Attribute - } - ) - - setIncorrectFields := func(nr payload.P2PNotaryRequest, m mTX, f fbTX) payload.P2PNotaryRequest { - if m.sigs != nil { - nr.MainTransaction.Signers = m.sigs - } - - if m.scripts != nil { - nr.MainTransaction.Scripts = m.scripts - } - - if m.attrs != nil { - nr.MainTransaction.Attributes = m.attrs - } - - if f.attrs != nil { - nr.FallbackTransaction.Attributes = f.attrs - } - - return nr - } - - alphaVerificationScript, _ := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) - wrongAlphaVerificationScript, _ := smartcontract.CreateMultiSigRedeemScript(len(wrongAlphaKeys)*2/3+1, wrongAlphaKeys) - - tests := []struct { - name string - addW bool // additional witness for non alphabet invocations - mTX mTX - fbTX fbTX - expErr error - }{ - { - name: "incorrect witness amount", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{{}}, - }, - expErr: errUnexpectedWitnessAmount, - }, - { - name: "not dummy invocation script", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - InvocationScript: wrongDummyInvocationScript, - }, - {}, - }, - }, - expErr: ErrTXAlreadyHandled, - }, - { - name: "incorrect main TX signers amount", - addW: false, - mTX: mTX{ - sigs: []transaction.Signer{{}}, - }, - expErr: errUnexpectedCosignersAmount, - }, - { - name: "incorrect main TX Alphabet signer", - addW: false, - mTX: mTX{ - sigs: []transaction.Signer{ - {}, - { - Account: hash.Hash160(wrongAlphaVerificationScript), - }, - {}, - }, - }, - expErr: errIncorrectAlphabetSigner, - }, - { - name: "incorrect main TX attribute amount", - addW: false, - mTX: mTX{ - attrs: []transaction.Attribute{{}, {}}, - }, - expErr: errIncorrectAttributesAmount, - }, - { - name: "incorrect main TX attribute", - addW: false, - mTX: mTX{ - attrs: []transaction.Attribute{ - { - Value: &transaction.NotaryAssisted{ - NKeys: uint8(len(alphaKeys) + 1), - }, - }, - }, - }, - expErr: errIncorrectAttribute, - }, - { - name: "incorrect main TX proxy witness", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - { - InvocationScript: make([]byte, 1), - }, - { - InvocationScript: dummyAlphabetInvocationScript, - }, - {}, - }, - }, - expErr: errIncorrectProxyWitnesses, - }, - { - name: "incorrect main TX proxy witness compat", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - { - InvocationScript: make([]byte, 1), - }, - { - InvocationScript: dummyAlphabetInvocationScriptOld, - }, - {}, - }, - }, - expErr: errIncorrectProxyWitnesses, - }, - { - name: "incorrect main TX Alphabet witness", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - VerificationScript: wrongAlphaVerificationScript, - InvocationScript: dummyAlphabetInvocationScript, - }, - {}, - }, - }, - expErr: errIncorrectAlphabet, - }, - { - name: "incorrect main TX Alphabet witness compat", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - VerificationScript: wrongAlphaVerificationScript, - InvocationScript: dummyAlphabetInvocationScriptOld, - }, - {}, - }, - }, - expErr: errIncorrectAlphabet, - }, - { - name: "incorrect main TX Notary witness", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - VerificationScript: alphaVerificationScript, - InvocationScript: dummyAlphabetInvocationScript, - }, - { - InvocationScript: wrongDummyInvocationScript, - }, - }, - }, - expErr: errIncorrectNotaryPlaceholder, - }, - { - name: "incorrect main TX Notary witness compat", - addW: false, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - VerificationScript: alphaVerificationScript, - InvocationScript: dummyAlphabetInvocationScriptOld, - }, - { - InvocationScript: wrongDummyInvocationScript, - }, - }, - }, - expErr: errIncorrectNotaryPlaceholder, - }, - { - name: "incorrect fb TX attributes amount", - addW: false, - fbTX: fbTX{ - attrs: []transaction.Attribute{{}}, - }, - expErr: errIncorrectFBAttributesAmount, - }, - { - name: "incorrect fb TX attributes", - addW: false, - fbTX: fbTX{ - attrs: []transaction.Attribute{{}, {}, {}}, - }, - expErr: errIncorrectFBAttributes, - }, - { - name: "expired fb TX", - addW: false, - fbTX: fbTX{ - []transaction.Attribute{ - {}, - { - Type: transaction.NotValidBeforeT, - Value: &transaction.NotValidBefore{ - Height: 1, - }, - }, - {}, - }, - }, - expErr: &ExpiredTXError{}, - }, - { - name: "incorrect invoker TX Alphabet witness", - addW: true, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - VerificationScript: alphaVerificationScript, - InvocationScript: dummyAlphabetInvocationScript, - }, - {}, - {}, - }, - }, - expErr: errIncorrectInvokerWitnesses, - }, - { - name: "incorrect invoker TX Alphabet witness compat", - addW: true, - mTX: mTX{ - scripts: []transaction.Witness{ - {}, - { - VerificationScript: alphaVerificationScript, - InvocationScript: dummyAlphabetInvocationScriptOld, - }, - {}, - {}, - }, - }, - expErr: errIncorrectInvokerWitnesses, - }, - { - name: "incorrect main TX attribute with invoker", - addW: true, - mTX: mTX{ - attrs: []transaction.Attribute{ - { - Value: &transaction.NotaryAssisted{ - NKeys: uint8(len(alphaKeys) + 2), - }, - }, - }, - }, - expErr: errIncorrectAttribute, - }, - } - - preparator := notaryPreparator( - PreparatorPrm{ - alphaKeysSource(), - blockCounter{100, nil}, - }, - ) - - var ( - incorrectNR payload.P2PNotaryRequest - err error - ) - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - correctNR := correctNR(nil, false, test.addW) - incorrectNR = setIncorrectFields(*correctNR, test.mTX, test.fbTX) - - _, err = preparator.Prepare(&incorrectNR) - - require.EqualError(t, err, test.expErr.Error()) - }) - } -} - -func TestPrepare_CorrectNR(t *testing.T) { - tests := []struct { - hash util.Uint160 - method string - args []any - }{ - { - scriptHash, - "test1", - nil, - }, - { - scriptHash, - "test2", - []any{ - int64(4), - "test", - []any{ - int64(4), - false, - true, - }, - }, - }, - } - - preparator := notaryPreparator( - PreparatorPrm{ - alphaKeysSource(), - blockCounter{100, nil}, - }, - ) - - for _, test := range tests { - for i := range 1 { // run tests against 3 and 4 witness NR - for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness - additionalWitness := i == 0 - nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness) - - event, err := preparator.Prepare(nr) - - require.NoError(t, err) - require.Equal(t, test.method, event.Type().String()) - require.Equal(t, test.hash.StringLE(), event.ScriptHash().StringLE()) - - // check args parsing - bw := io.NewBufBinWriter() - emit.Array(bw.BinWriter, test.args...) - - ctx := vm.NewContext(bw.Bytes()) - - opCode, param, err := ctx.Next() - require.NoError(t, err) - - for _, opGot := range event.Params() { - require.Equal(t, opCode, opGot.code) - require.Equal(t, param, opGot.param) - - opCode, param, err = ctx.Next() - require.NoError(t, err) - } - - _, _, err = ctx.Next() // PACK opcode - require.NoError(t, err) - _, _, err = ctx.Next() // packing len opcode - require.NoError(t, err) - - opCode, _, err = ctx.Next() - require.NoError(t, err) - require.Equal(t, opcode.RET, opCode) - } - } - } -} - -func alphaKeysSource() client.AlphabetKeys { - return func() (keys.PublicKeys, error) { - return alphaKeys, nil - } -} - -func script(hash util.Uint160, method string, args ...any) []byte { - bw := io.NewBufBinWriter() - - if len(args) > 0 { - emit.AppCall(bw.BinWriter, hash, method, callflag.All, args) - } else { - emit.AppCallNoArgs(bw.BinWriter, hash, method, callflag.All) - } - - return bw.Bytes() -} - -func correctNR(script []byte, dummyMultisig, additionalWitness bool) *payload.P2PNotaryRequest { - alphaVerificationScript, _ := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) - - signers := []transaction.Signer{ - {}, - { - Account: hash.Hash160(alphaVerificationScript), - }, - {}, - } - if additionalWitness { // insert on element with index 2 - signers = append(signers[:2+1], signers[2:]...) - signers[2] = transaction.Signer{Account: hash.Hash160(alphaVerificationScript)} - } - - multisigInv := dummyAlphabetInvocationScript - if dummyMultisig { - multisigInv = dummyAlphabetInvocationScriptOld - } - scripts := []transaction.Witness{ - {}, - { - InvocationScript: multisigInv, - VerificationScript: alphaVerificationScript, - }, - { - InvocationScript: multisigInv, - }, - } - if additionalWitness { // insert on element with index 2 - scripts = append(scripts[:2+1], scripts[2:]...) - scripts[2] = transaction.Witness{ - InvocationScript: multisigInv, - VerificationScript: alphaVerificationScript, - } - } - - nKeys := uint8(len(alphaKeys)) - if additionalWitness { - nKeys++ - } - - return &payload.P2PNotaryRequest{ - MainTransaction: &transaction.Transaction{ - Signers: signers, - Scripts: scripts, - Attributes: []transaction.Attribute{ - { - Value: &transaction.NotaryAssisted{ - NKeys: nKeys, - }, - }, - }, - Script: script, - }, - FallbackTransaction: &transaction.Transaction{ - Attributes: []transaction.Attribute{ - {}, - { - Type: transaction.NotValidBeforeT, - Value: &transaction.NotValidBefore{ - Height: 1000, - }, - }, - {}, - }, - }, - } -} diff --git a/pkg/morph/event/notification.go b/pkg/morph/event/notification.go deleted file mode 100644 index d614844ce..000000000 --- a/pkg/morph/event/notification.go +++ /dev/null @@ -1,31 +0,0 @@ -package event - -// Type is a notification event enumeration type. -type Type string - -// Event is an interface that is -// provided by Neo:Morph event structures. -type Event interface { - MorphEvent() -} - -// Equal compares two Type values and -// returns true if they are equal. -func (t Type) Equal(t2 Type) bool { - return string(t) == string(t2) -} - -// String returns casted to string Type. -func (t Type) String() string { - return string(t) -} - -// TypeFromBytes converts bytes slice to Type. -func TypeFromBytes(data []byte) Type { - return Type(data) -} - -// TypeFromString converts string to Type. -func TypeFromString(str string) Type { - return Type(str) -} diff --git a/pkg/morph/event/opcodes.go b/pkg/morph/event/opcodes.go deleted file mode 100644 index 3385c2eac..000000000 --- a/pkg/morph/event/opcodes.go +++ /dev/null @@ -1,59 +0,0 @@ -package event - -import ( - "fmt" - - "github.com/nspcc-dev/neo-go/pkg/encoding/bigint" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -// Op is wrapper over Neo VM's opcode -// and its parameter. -type Op struct { - code opcode.Opcode - param []byte -} - -// Code returns Neo VM opcode. -func (o Op) Code() opcode.Opcode { - return o.code -} - -// Param returns parameter of wrapped -// Neo VM opcode. -func (o Op) Param() []byte { - return o.param -} - -// Below are the functions which reverse the results of github.com/nspcc-dev/neo-go/pkg/vm/emit.Array function. - -// BytesFromOpcode tries to retrieve bytes from Op. -func BytesFromOpcode(op Op) ([]byte, error) { - switch code := op.Code(); code { - case opcode.PUSHDATA1, opcode.PUSHDATA2, opcode.PUSHDATA4: - return op.Param(), nil - default: - return nil, fmt.Errorf("unexpected ByteArray opcode %s", code) - } -} - -// IntFromOpcode tries to retrieve int from Op. -func IntFromOpcode(op Op) (int64, error) { - switch code := op.Code(); { - case code == opcode.PUSHM1: - return -1, nil - case code >= opcode.PUSH0 && code <= opcode.PUSH16: - return int64(code - opcode.PUSH0), nil - case code <= opcode.PUSHINT256: - return bigint.FromBytes(op.Param()).Int64(), nil - default: - return 0, fmt.Errorf("unexpected INT opcode %s", code) - } -} - -// StringFromOpcode tries to retrieve string from Op. -func StringFromOpcode(op Op) (string, error) { - // strings are emitted like bytes - data, err := BytesFromOpcode(op) - return string(data), err -} diff --git a/pkg/morph/event/opcodes_test.go b/pkg/morph/event/opcodes_test.go deleted file mode 100644 index fa8673778..000000000 --- a/pkg/morph/event/opcodes_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package event - -import ( - "testing" - - "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/vm" - "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/stretchr/testify/require" -) - -func TestBytesFromOpcode(t *testing.T) { - tests := [...][]byte{ - []byte("test"), - []byte("test test"), - []byte(""), - []byte("1"), - } - - bw := io.NewBufBinWriter() - - for _, test := range tests { - emit.Bytes(bw.BinWriter, test) - } - - var ( - ctx = vm.NewContext(bw.Bytes()) - - op Op - - gotBytes []byte - err error - ) - - for _, test := range tests { - op = getNextOp(ctx) - - gotBytes, err = BytesFromOpcode(op) - - require.NoError(t, err) - require.Equal(t, test, gotBytes) - } -} - -func TestIntFromOpcode(t *testing.T) { - tests := [...]int64{ - -1, - -5, - 15, - 16, - 1_000_000, - } - - bw := io.NewBufBinWriter() - - for _, test := range tests { - emit.Int(bw.BinWriter, test) - } - - var ( - ctx = vm.NewContext(bw.Bytes()) - - op Op - - gotInt int64 - err error - ) - - for _, test := range tests { - op = getNextOp(ctx) - - gotInt, err = IntFromOpcode(op) - - require.NoError(t, err) - require.Equal(t, test, gotInt) - } -} - -func getNextOp(ctx *vm.Context) (op Op) { - op.code, op.param, _ = ctx.Next() - return -} diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go deleted file mode 100644 index 5adeb4b30..000000000 --- a/pkg/morph/event/parsers.go +++ /dev/null @@ -1,55 +0,0 @@ -package event - -import ( - "fmt" - - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -// NotificationParser is a function that constructs Event -// from the StackItem list. -type NotificationParser func(*state.ContainedNotificationEvent) (Event, error) - -// NotaryPreparator constructs NotaryEvent -// from the NotaryRequest event. -type NotaryPreparator interface { - Prepare(*payload.P2PNotaryRequest) (NotaryEvent, error) -} - -// NotaryParser is a function that constructs Event -// from the NotaryEvent event. -type NotaryParser func(NotaryEvent) (Event, error) - -// NotaryParserInfo is a structure that groups -// the parameters of particular notary request -// event parser. -type NotaryParserInfo struct { - notaryRequestTypes - - p NotaryParser -} - -func (n *NotaryParserInfo) parser() NotaryParser { - return n.p -} - -func (n *NotaryParserInfo) SetParser(p NotaryParser) { - n.p = p -} - -type wrongPrmNumber struct { - exp, act int -} - -// WrongNumberOfParameters returns an error about wrong number of smart contract parameters. -func WrongNumberOfParameters(exp, act int) error { - return &wrongPrmNumber{ - exp: exp, - act: act, - } -} - -func (s wrongPrmNumber) Error() string { - return fmt.Errorf("wrong parameter count: expected %d, has %d", s.exp, s.act).Error() -} diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go deleted file mode 100644 index b384e436b..000000000 --- a/pkg/morph/event/rolemanagement/designate.go +++ /dev/null @@ -1,45 +0,0 @@ -package rolemanagement - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Designate represents designation event of the mainnet RoleManagement contract. -type Designate struct { - Role noderoles.Role - - // TxHash is used in notary environmental - // for calculating unique but same for - // all notification receivers values. - TxHash util.Uint256 -} - -// MorphEvent implements Neo:Morph Event interface. -func (Designate) MorphEvent() {} - -// ParseDesignate from notification into container event structure. -func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack items from notify event: %w", err) - } - - if len(params) != 2 { - return nil, event.WrongNumberOfParameters(2, len(params)) - } - - bi, err := params[0].TryInteger() - if err != nil { - return nil, fmt.Errorf("invalid stackitem type: %w", err) - } - - return Designate{ - Role: noderoles.Role(bi.Int64()), - TxHash: e.Container, - }, nil -} diff --git a/pkg/morph/event/rolemanagement/designate_test.go b/pkg/morph/event/rolemanagement/designate_test.go deleted file mode 100644 index c5238b0bc..000000000 --- a/pkg/morph/event/rolemanagement/designate_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package rolemanagement - -import ( - "testing" - - "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseRoleUpdate(t *testing.T) { - t.Run("wrong number of arguments", func(t *testing.T) { - _, err := ParseDesignate(createNotifyEventFromItems([]stackitem.Item{})) - require.Error(t, err) - }) - t.Run("invalid item type", func(t *testing.T) { - args := []stackitem.Item{stackitem.NewMap(), stackitem.Make(123)} - _, err := ParseDesignate(createNotifyEventFromItems(args)) - require.Error(t, err) - }) - t.Run("good", func(t *testing.T) { - args := []stackitem.Item{stackitem.Make(int(noderoles.NeoFSAlphabet)), stackitem.Make(123)} - e, err := ParseDesignate(createNotifyEventFromItems(args)) - require.NoError(t, err) - require.Equal(t, noderoles.NeoFSAlphabet, e.(Designate).Role) - }) -} - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go deleted file mode 100644 index 0088be400..000000000 --- a/pkg/morph/event/utils.go +++ /dev/null @@ -1,99 +0,0 @@ -package event - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "go.uber.org/zap" -) - -type scriptHashValue struct { - hash util.Uint160 -} - -type scriptHashWithType struct { - Hash util.Uint160 - Type Type -} - -type notaryRequestTypes struct { - notaryRequestMempoolType - notaryRequestType - scriptHashValue -} - -type notaryRequestMempoolType struct { - mempoolTyp mempoolevent.Type -} - -type notaryRequestType struct { - notaryType NotaryType -} - -// GetMempoolType is a notary request mempool type getter. -func (n notaryRequestMempoolType) GetMempoolType() mempoolevent.Type { - return n.mempoolTyp -} - -// SetMempoolType is a notary request mempool type setter. -func (n *notaryRequestMempoolType) SetMempoolType(typ mempoolevent.Type) { - n.mempoolTyp = typ -} - -// RequestType is a notary request type getter. -func (n notaryRequestType) RequestType() NotaryType { - return n.notaryType -} - -// SetRequestType is a notary request type setter. -func (n *notaryRequestType) SetRequestType(typ NotaryType) { - n.notaryType = typ -} - -// SetScriptHash is a script hash setter. -func (s *scriptHashValue) SetScriptHash(v util.Uint160) { - s.hash = v -} - -// ScriptHash is a script hash getter. -func (s scriptHashValue) ScriptHash() util.Uint160 { - return s.hash -} - -// WorkerPoolHandler sets closure over worker pool w with passed handler h. -func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler { - return func(ctx context.Context, e Event) { - err := w.Submit(func() { - h(ctx, e) - }) - if err != nil { - log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool, - zap.Error(err), - ) - } - } -} - -var errEmptyStackArray = errors.New("stack item array is empty") - -// ParseStackArray parses stack array from raw notification -// event received from neo-go RPC node. -func ParseStackArray(event *state.ContainedNotificationEvent) ([]stackitem.Item, error) { - arr, err := client.ArrayFromStackItem(event.Item) - if err != nil { - return nil, fmt.Errorf("stack item is not an array type: %w", err) - } else if len(arr) == 0 { - return nil, errEmptyStackArray - } - - return arr, nil -} diff --git a/pkg/morph/event/utils_test.go b/pkg/morph/event/utils_test.go deleted file mode 100644 index 83facc653..000000000 --- a/pkg/morph/event/utils_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package event - -import ( - "math/big" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseStackArray(t *testing.T) { - t.Run("success", func(t *testing.T) { - arr := &stackitem.Array{} - arr.Append(stackitem.NewBigInteger(big.NewInt(1))) - arr.Append(stackitem.NewBigInteger(big.NewInt(2))) - ev := &state.ContainedNotificationEvent{ - Container: util.Uint256{67}, - NotificationEvent: state.NotificationEvent{ - ScriptHash: util.Uint160{69}, - Name: "name", - Item: arr, - }, - } - - items, err := ParseStackArray(ev) - require.NoError(t, err, "failed to parse event items") - require.Equal(t, 2, len(items), "invalid length") - require.Equal(t, stackitem.NewBigInteger(big.NewInt(1)), items[0], "invalid item 0") - require.Equal(t, stackitem.NewBigInteger(big.NewInt(2)), items[1], "invalid item 0") - }) - t.Run("empty stack error", func(t *testing.T) { - arr := &stackitem.Array{} - ev := &state.ContainedNotificationEvent{ - Container: util.Uint256{67}, - NotificationEvent: state.NotificationEvent{ - ScriptHash: util.Uint160{69}, - Name: "name", - Item: arr, - }, - } - - items, err := ParseStackArray(ev) - require.ErrorIs(t, err, errEmptyStackArray, "invalid empty array error") - require.Equal(t, 0, len(items), "items was returned") - }) -} diff --git a/pkg/morph/metrics/metrics.go b/pkg/morph/metrics/metrics.go deleted file mode 100644 index 5d74b054d..000000000 --- a/pkg/morph/metrics/metrics.go +++ /dev/null @@ -1,21 +0,0 @@ -package metrics - -import "time" - -type Register interface { - IncSwitchCount() - SetLastBlock(uint32) - IncNotificationCount(notificationType string) - ObserveInvoke(typ string, contract string, method string, success bool, d time.Duration) -} - -type NoopRegister struct{} - -func (NoopRegister) IncSwitchCount() {} -func (NoopRegister) SetLastBlock(uint32) {} -func (NoopRegister) IncNotificationCount(string) {} -func (NoopRegister) ObserveInvoke(string, string, string, bool, time.Duration) {} - -type NoopMorphCacheMetrics struct{} - -func (m *NoopMorphCacheMetrics) AddMethodDuration(string, bool, time.Duration) {} diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go deleted file mode 100644 index 4ef59ed6a..000000000 --- a/pkg/morph/subscriber/subscriber.go +++ /dev/null @@ -1,352 +0,0 @@ -package subscriber - -import ( - "context" - "errors" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -type ( - NotificationChannels struct { - BlockCh <-chan *block.Block - NotificationsCh <-chan *state.ContainedNotificationEvent - NotaryRequestsCh <-chan *result.NotaryRequestEvent - } - - // Subscriber is an interface of the NotificationEvent listener. - Subscriber interface { - SubscribeForNotification(...util.Uint160) error - BlockNotifications() error - SubscribeForNotaryRequests(mainTXSigner util.Uint160) error - - NotificationChannels() NotificationChannels - - Close() - } - - subChannels struct { - NotifyChan chan *state.ContainedNotificationEvent - BlockChan chan *block.Block - NotaryChan chan *result.NotaryRequestEvent - } - - subscriber struct { - sync.RWMutex - log *logger.Logger - client *client.Client - - notifyChan chan *state.ContainedNotificationEvent - blockChan chan *block.Block - notaryChan chan *result.NotaryRequestEvent - - current subChannels - - // cached subscription information - subscribedEvents map[util.Uint160]bool - subscribedNotaryEvents map[util.Uint160]bool - subscribedToNewBlocks bool - } - - // Params is a group of Subscriber constructor parameters. - Params struct { - Log *logger.Logger - StartFromBlock uint32 - Client *client.Client - } -) - -func (s *subscriber) NotificationChannels() NotificationChannels { - return NotificationChannels{ - BlockCh: s.blockChan, - NotificationsCh: s.notifyChan, - NotaryRequestsCh: s.notaryChan, - } -} - -var ( - errNilParams = errors.New("chain/subscriber: config was not provided to the constructor") - - errNilLogger = errors.New("chain/subscriber: logger was not provided to the constructor") - - errNilClient = errors.New("chain/subscriber: client was not provided to the constructor") -) - -func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) error { - s.Lock() - defer s.Unlock() - - notifyIDs := make([]string, 0, len(contracts)) - - for i := range contracts { - if s.subscribedEvents[contracts[i]] { - continue - } - // subscribe to contract notifications - id, err := s.client.ReceiveExecutionNotifications(contracts[i], s.current.NotifyChan) - if err != nil { - // if there is some error, undo all subscriptions and return error - for _, id := range notifyIDs { - _ = s.client.Unsubscribe(id) - } - - return err - } - - // save notification id - notifyIDs = append(notifyIDs, id) - } - for i := range contracts { - s.subscribedEvents[contracts[i]] = true - } - - return nil -} - -func (s *subscriber) Close() { - s.client.Close() -} - -func (s *subscriber) BlockNotifications() error { - s.Lock() - defer s.Unlock() - if s.subscribedToNewBlocks { - return nil - } - if _, err := s.client.ReceiveBlocks(s.current.BlockChan); err != nil { - return fmt.Errorf("could not subscribe for new block events: %w", err) - } - - s.subscribedToNewBlocks = true - - return nil -} - -func (s *subscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error { - s.Lock() - defer s.Unlock() - if s.subscribedNotaryEvents[mainTXSigner] { - return nil - } - if _, err := s.client.ReceiveNotaryRequests(mainTXSigner, s.current.NotaryChan); err != nil { - return fmt.Errorf("could not subscribe for notary request events: %w", err) - } - - s.subscribedNotaryEvents[mainTXSigner] = true - return nil -} - -// New is a constructs Neo:Morph event listener and returns Subscriber interface. -func New(ctx context.Context, p *Params) (Subscriber, error) { - switch { - case p == nil: - return nil, errNilParams - case p.Log == nil: - return nil, errNilLogger - case p.Client == nil: - return nil, errNilClient - } - - err := awaitHeight(p.Client, p.StartFromBlock) - if err != nil { - return nil, err - } - - sub := &subscriber{ - log: p.Log, - client: p.Client, - notifyChan: make(chan *state.ContainedNotificationEvent), - blockChan: make(chan *block.Block), - notaryChan: make(chan *result.NotaryRequestEvent), - - current: newSubChannels(), - - subscribedEvents: make(map[util.Uint160]bool), - subscribedNotaryEvents: make(map[util.Uint160]bool), - } - // Worker listens all events from temporary NeoGo channel and puts them - // into corresponding permanent channels. - go sub.routeNotifications(ctx) - - return sub, nil -} - -func (s *subscriber) routeNotifications(ctx context.Context) { - var ( - restoreCh = make(chan bool) - restoreInProgress bool - ) - -routeloop: - for { - var connLost bool - s.RLock() - curr := s.current - s.RUnlock() - select { - case <-ctx.Done(): - break routeloop - case ev, ok := <-curr.NotifyChan: - if ok { - s.client.Metrics().IncNotificationCount("notify") - s.notifyChan <- ev - } else { - connLost = true - } - case ev, ok := <-curr.BlockChan: - if ok { - s.client.Metrics().IncNotificationCount("block") - s.client.Metrics().SetLastBlock(ev.Index) - s.blockChan <- ev - } else { - connLost = true - } - case ev, ok := <-curr.NotaryChan: - if ok { - s.client.Metrics().IncNotificationCount("notary") - s.notaryChan <- ev - } else { - connLost = true - } - case ok := <-restoreCh: - restoreInProgress = false - if !ok { - connLost = true - } - } - if connLost { - if !restoreInProgress { - restoreInProgress = s.switchEndpoint(ctx, restoreCh) - if !restoreInProgress { - break routeloop - } - curr.drain() - } else { // Avoid getting additional !ok events. - s.Lock() - s.current.NotifyChan = nil - s.current.BlockChan = nil - s.current.NotaryChan = nil - s.Unlock() - } - } - } - close(s.notifyChan) - close(s.blockChan) - close(s.notaryChan) -} - -func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool { - s.log.Info(ctx, logs.RPConnectionLost) - if !s.client.SwitchRPC(ctx) { - s.log.Error(ctx, logs.RPCNodeSwitchFailure) - return false - } - - s.Lock() - chs := newSubChannels() - go func() { - finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan) - }() - s.current = chs - s.Unlock() - - s.client.Metrics().IncSwitchCount() - return true -} - -func newSubChannels() subChannels { - return subChannels{ - NotifyChan: make(chan *state.ContainedNotificationEvent), - BlockChan: make(chan *block.Block), - NotaryChan: make(chan *result.NotaryRequestEvent), - } -} - -func (s *subChannels) drain() { -drainloop: - for { - select { - case _, ok := <-s.NotifyChan: - if !ok { - s.NotifyChan = nil - } - case _, ok := <-s.BlockChan: - if !ok { - s.BlockChan = nil - } - case _, ok := <-s.NotaryChan: - if !ok { - s.NotaryChan = nil - } - default: - break drainloop - } - } -} - -// restoreSubscriptions restores subscriptions according to -// cached information about them. -func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent, - blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent, -) bool { - var err error - - // new block events restoration - if s.subscribedToNewBlocks { - _, err = s.client.ReceiveBlocks(blCh) - if err != nil { - s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) - return false - } - } - - // notification events restoration - for contract := range s.subscribedEvents { - _, err = s.client.ReceiveExecutionNotifications(contract, notifCh) - if err != nil { - s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) - return false - } - } - - // notary notification events restoration - for signer := range s.subscribedNotaryEvents { - _, err = s.client.ReceiveNotaryRequests(signer, notaryCh) - if err != nil { - s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) - return false - } - } - return true -} - -// awaitHeight checks if remote client has least expected block height and -// returns error if it is not reached that height after timeout duration. -// This function is required to avoid connections to unsynced RPC nodes, because -// they can produce events from the past that should not be processed by -// FrostFS nodes. -func awaitHeight(cli *client.Client, startFrom uint32) error { - if startFrom == 0 { - return nil - } - - height, err := cli.BlockCount() - if err != nil { - return fmt.Errorf("could not get block height: %w", err) - } - - if height < startFrom { - return fmt.Errorf("RPC block counter %d didn't reach expected height %d", height, startFrom) - } - - return nil -} diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go deleted file mode 100644 index 974be1120..000000000 --- a/pkg/morph/timer/block.go +++ /dev/null @@ -1,120 +0,0 @@ -package timer - -import ( - "sync" -) - -// BlockMeter calculates block time interval dynamically. -type BlockMeter func() (uint32, error) - -// BlockTickHandler is a callback of a certain block advance. -type BlockTickHandler func() - -// BlockTimer represents block timer. -// -// It can tick the blocks and perform certain actions -// on block time intervals. -type BlockTimer struct { - mtx sync.Mutex - - dur BlockMeter - - baseDur uint32 - - cur, tgt uint32 - - last uint32 - - h BlockTickHandler - - once bool -} - -// StaticBlockMeter returns BlockMeters that always returns (d, nil). -func StaticBlockMeter(d uint32) BlockMeter { - return func() (uint32, error) { - return d, nil - } -} - -// NewBlockTimer creates a new BlockTimer. -// -// Reset should be called before timer ticking. -func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer { - return &BlockTimer{ - dur: dur, - h: h, - } -} - -// NewOneTickTimer creates a new BlockTimer that ticks only once. -func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer { - return &BlockTimer{ - dur: dur, - h: h, - once: true, - } -} - -// Reset resets previous ticks of the BlockTimer. -// -// Returns BlockMeter's error upon occurrence. -func (t *BlockTimer) Reset() error { - d, err := t.dur() - if err != nil { - return err - } - - t.mtx.Lock() - - t.resetWithBaseInterval(d) - - t.mtx.Unlock() - - return nil -} - -func (t *BlockTimer) resetWithBaseInterval(d uint32) { - t.baseDur = d - t.reset() -} - -func (t *BlockTimer) reset() { - delta := t.baseDur - if delta == 0 { - delta = 1 - } - - t.tgt = delta - t.cur = 0 -} - -// Tick ticks one block in the BlockTimer. -// -// Executes all callbacks which are awaiting execution at the new block. -func (t *BlockTimer) Tick(h uint32) { - t.mtx.Lock() - t.tick(h) - t.mtx.Unlock() -} - -func (t *BlockTimer) tick(h uint32) { - if h != 0 && t.last == h { - return - } - - t.last = h - t.cur++ - - if t.cur == t.tgt { - // it would be advisable to optimize such execution, for example: - // 1. push handler to worker pool t.wp.Submit(h); - // 2. call t.tickH(h) - t.h() - - if !t.once { - t.cur = 0 - t.reset() - } - } -} diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go deleted file mode 100644 index a144b3db6..000000000 --- a/pkg/morph/timer/block_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package timer_test - -import ( - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" - "github.com/stretchr/testify/require" -) - -func tickN(t *timer.BlockTimer, n uint32) { - for range n { - t.Tick(0) - } -} - -// This test emulates inner ring handling of a new epoch and a new block. -// "resetting" consists of ticking the current height as well and invoking `Reset`. -func TestIRBlockTimer_Reset(t *testing.T) { - var baseCounter [2]int - const blockDur = uint32(3) - - bt1 := timer.NewBlockTimer( - func() (uint32, error) { return blockDur, nil }, - func() { baseCounter[0]++ }) - bt2 := timer.NewBlockTimer( - func() (uint32, error) { return blockDur, nil }, - func() { baseCounter[1]++ }) - - require.NoError(t, bt1.Reset()) - require.NoError(t, bt2.Reset()) - - run := func(bt *timer.BlockTimer, direct bool) { - if direct { - bt.Tick(1) - require.NoError(t, bt.Reset()) - bt.Tick(1) - } else { - bt.Tick(1) - bt.Tick(1) - require.NoError(t, bt.Reset()) - } - bt.Tick(2) - bt.Tick(3) - } - - run(bt1, true) - run(bt2, false) - require.Equal(t, baseCounter[0], baseCounter[1]) -} - -func TestBlockTimer_ResetChangeDuration(t *testing.T) { - var dur uint32 = 2 - var err error - var counter int - - bt := timer.NewBlockTimer( - func() (uint32, error) { return dur, err }, - func() { counter++ }) - - require.NoError(t, bt.Reset()) - - tickN(bt, 2) - require.Equal(t, 1, counter) - - t.Run("return error", func(t *testing.T) { - dur = 5 - err = errors.New("my awesome error") - require.ErrorIs(t, bt.Reset(), err) - - tickN(bt, 2) - require.Equal(t, 2, counter) - }) - t.Run("change duration", func(t *testing.T) { - dur = 5 - err = nil - require.NoError(t, bt.Reset()) - - tickN(bt, 5) - require.Equal(t, 3, counter) - }) -} - -func TestBlockTimer(t *testing.T) { - const blockDur = uint32(10) - baseCallCounter := uint32(0) - - bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - intervalNum := uint32(7) - - tickN(bt, intervalNum*blockDur) - - require.Equal(t, intervalNum, uint32(baseCallCounter)) -} - -func TestNewOneTickTimer(t *testing.T) { - blockDur := uint32(1) - baseCallCounter := 0 - - bt := timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - require.NoError(t, bt.Reset()) - - tickN(bt, 10) - require.Equal(t, 1, baseCallCounter) // happens once no matter what - - t.Run("zero duration", func(t *testing.T) { - blockDur = uint32(0) - baseCallCounter = 0 - - bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - require.NoError(t, bt.Reset()) - - tickN(bt, 10) - require.Equal(t, 1, baseCallCounter) - }) -} - -func TestBlockTimer_TickSameHeight(t *testing.T) { - var baseCounter int - - blockDur := uint32(2) - bt := timer.NewBlockTimer( - func() (uint32, error) { return blockDur, nil }, - func() { baseCounter++ }) - require.NoError(t, bt.Reset()) - - check := func(t *testing.T, h uint32, base int) { - for range 2 * int(blockDur) { - bt.Tick(h) - require.Equal(t, base, baseCounter) - } - } - - check(t, 1, 0) - check(t, 2, 1) - check(t, 3, 1) - check(t, 4, 2) - - t.Run("works the same way after `Reset()`", func(t *testing.T) { - t.Run("same block duration", func(t *testing.T) { - require.NoError(t, bt.Reset()) - baseCounter = 0 - - check(t, 1, 0) - check(t, 2, 1) - check(t, 3, 1) - check(t, 4, 2) - }) - t.Run("different block duration", func(t *testing.T) { - blockDur = 3 - - require.NoError(t, bt.Reset()) - baseCounter = 0 - - check(t, 1, 0) - check(t, 2, 0) - check(t, 3, 1) - check(t, 4, 1) - check(t, 5, 1) - check(t, 6, 2) - }) - }) -} diff --git a/pkg/network/address.go b/pkg/network/address.go deleted file mode 100644 index 4643eef15..000000000 --- a/pkg/network/address.go +++ /dev/null @@ -1,125 +0,0 @@ -package network - -import ( - "errors" - "net" - "net/url" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" -) - -/* - HostAddr strings: "localhost:8080", ":8080", "192.168.0.1:8080" - MultiAddr strings: "/dns4/localhost/tcp/8080", "/ip4/192.168.0.1/tcp/8080" - IPAddr strings: "192.168.0.1:8080" - URIAddr strings: "127.0.0.1:8080" -*/ - -var errHostIsEmpty = errors.New("host is empty") - -// Address represents the FrostFS node -// network address. -type Address struct { - ma multiaddr.Multiaddr -} - -// String returns multiaddr string. -func (a Address) String() string { - return a.ma.String() -} - -// equal compares Address's. -func (a Address) equal(addr Address) bool { - return a.ma.Equal(addr.ma) -} - -// URIAddr returns Address as a URI. -// -// Panics if host address cannot be fetched from Address. -// -// See also FromString. -func (a Address) URIAddr() string { - _, host, err := manet.DialArgs(a.ma) - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - assert.NoError(err, "could not get host addr") - - if !a.IsTLSEnabled() { - return host - } - - return (&url.URL{ - Scheme: "grpcs", - Host: host, - }).String() -} - -// FromString restores Address from a string representation. -// -// Supports URIAddr, MultiAddr and HostAddr strings. -func (a *Address) FromString(s string) error { - var err error - - a.ma, err = multiaddr.NewMultiaddr(s) - if err != nil { - var ( - host string - hasTLS bool - ) - host, hasTLS, err = client.ParseURI(s) - if err != nil { - host = s - } - - s, err = multiaddrStringFromHostAddr(host) - if err == nil { - a.ma, err = multiaddr.NewMultiaddr(s) - if err == nil && hasTLS { - a.ma = a.ma.Encapsulate(tls) - } - } - } - - return err -} - -// multiaddrStringFromHostAddr converts "localhost:8080" to "/dns4/localhost/tcp/8080". -func multiaddrStringFromHostAddr(host string) (string, error) { - if len(host) == 0 { - return "", errHostIsEmpty - } - - endpoint, port, err := net.SplitHostPort(host) - if err != nil { - return "", err - } - - // Empty address in host `:8080` generates `/dns4//tcp/8080` multiaddr - // which is invalid. It could be `/tcp/8080` but this breaks - // `manet.DialArgs`. The solution is to manually parse it as 0.0.0.0 - if endpoint == "" { - return "/ip4/0.0.0.0/tcp/" + port, nil - } - - var ( - prefix = "/dns4" - addr = endpoint - ) - - if ip := net.ParseIP(endpoint); ip != nil { - addr = ip.String() - if ip.To4() == nil { - prefix = "/ip6" - } else { - prefix = "/ip4" - } - } - - const l4Protocol = "tcp" - - return strings.Join([]string{prefix, addr, l4Protocol, port}, "/"), nil -} diff --git a/pkg/network/address_test.go b/pkg/network/address_test.go deleted file mode 100644 index deac8a20f..000000000 --- a/pkg/network/address_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package network - -import ( - "testing" - - "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" -) - -func TestAddressFromString(t *testing.T) { - t.Run("valid addresses", func(t *testing.T) { - testcases := []struct { - inp string - exp multiaddr.Multiaddr - }{ - {":8080", buildMultiaddr("/ip4/0.0.0.0/tcp/8080", t)}, - {"example.com:7070", buildMultiaddr("/dns4/example.com/tcp/7070", t)}, - {"213.44.87.1:32512", buildMultiaddr("/ip4/213.44.87.1/tcp/32512", t)}, - {"[2004:eb1::1]:8080", buildMultiaddr("/ip6/2004:eb1::1/tcp/8080", t)}, - {"grpc://example.com:7070", buildMultiaddr("/dns4/example.com/tcp/7070", t)}, - {"grpcs://example.com:7070", buildMultiaddr("/dns4/example.com/tcp/7070/"+tlsProtocolName, t)}, - } - - var addr Address - - for _, testcase := range testcases { - err := addr.FromString(testcase.inp) - require.NoError(t, err) - require.Equal(t, testcase.exp, addr.ma, testcase.inp) - } - }) - t.Run("invalid addresses", func(t *testing.T) { - testCases := []string{ - "wtf://example.com:123", // wrong scheme - "grpc://example.com", // missing port - } - - var addr Address - for _, tc := range testCases { - require.Error(t, addr.FromString(tc)) - } - }) -} - -func TestAddress_HostAddrString(t *testing.T) { - t.Run("valid addresses", func(t *testing.T) { - testcases := []struct { - ma multiaddr.Multiaddr - exp string - }{ - {buildMultiaddr("/dns4/frostfs.bigcorp.com/tcp/8080", t), "frostfs.bigcorp.com:8080"}, - {buildMultiaddr("/ip4/172.16.14.1/tcp/8080", t), "172.16.14.1:8080"}, - {buildMultiaddr("/ip4/192.168.0.1/tcp/8888/tls", t), "grpcs://192.168.0.1:8888"}, - } - - for _, testcase := range testcases { - addr := Address{testcase.ma} - - got := addr.URIAddr() - - require.Equal(t, testcase.exp, got) - } - }) - - t.Run("invalid addresses", func(t *testing.T) { - testcases := []multiaddr.Multiaddr{ - buildMultiaddr("/tcp/8080", t), - } - - for _, testcase := range testcases { - addr := Address{testcase} - require.Panics(t, func() { addr.URIAddr() }) - } - }) -} - -func buildMultiaddr(s string, t *testing.T) multiaddr.Multiaddr { - ma, err := multiaddr.NewMultiaddr(s) - require.NoError(t, err) - return ma -} diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go deleted file mode 100644 index 63ae0bfdb..000000000 --- a/pkg/network/cache/client.go +++ /dev/null @@ -1,91 +0,0 @@ -package cache - -import ( - "crypto/ecdsa" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" -) - -type ( - // ClientCache is a structure around frostfs-sdk-go/client to reuse - // already created clients. - ClientCache struct { - mu sync.RWMutex - clients map[string]*multiClient - opts ClientCacheOpts - } - - ClientCacheOpts struct { - DialTimeout time.Duration - StreamTimeout time.Duration - ReconnectTimeout time.Duration - Key *ecdsa.PrivateKey - ResponseCallback func(client.ResponseMetaInfo) error - AllowExternal bool - DialerSource *net.DialerSource - } -) - -// NewSDKClientCache creates instance of client cache. -// `opts` are used for new client creation. -func NewSDKClientCache(opts ClientCacheOpts) *ClientCache { - return &ClientCache{ - clients: make(map[string]*multiClient), - opts: opts, - } -} - -// Get function returns existing client or creates a new one. -func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error) { - netAddr := info.AddressGroup() - if c.opts.AllowExternal { - netAddr = append(netAddr, info.ExternalAddressGroup()...) - } - cacheKey := string(info.PublicKey()) - - c.mu.RLock() - if cli, ok := c.clients[cacheKey]; ok { - c.mu.RUnlock() - cli.updateGroup(netAddr) - return cli, nil - } - - c.mu.RUnlock() - // if client is not found in cache, then create a new one - c.mu.Lock() - defer c.mu.Unlock() - - // check once again if client is missing in cache, concurrent routine could - // create client while this routine was locked on `c.mu.Lock()`. - if cli, ok := c.clients[cacheKey]; ok { - // No need to update address group as the client has just been created. - return cli, nil - } - - newClientOpts := c.opts - newClientOpts.ResponseCallback = clientcore.AssertKeyResponseCallback(info.PublicKey()) - cli := newMultiClient(netAddr, newClientOpts) - - c.clients[cacheKey] = cli - - return cli, nil -} - -// CloseAll closes underlying connections of all cached clients. -// -// Ignores closing errors. -func (c *ClientCache) CloseAll() { - c.mu.RLock() - - { - for _, cl := range c.clients { - _ = cl.Close() - } - } - - c.mu.RUnlock() -} diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go deleted file mode 100644 index 54c1e18fb..000000000 --- a/pkg/network/cache/multi.go +++ /dev/null @@ -1,389 +0,0 @@ -package cache - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type singleClient struct { - sync.RWMutex - client clientcore.Client - lastAttempt time.Time -} - -type multiClient struct { - mtx sync.RWMutex - - clients map[string]*singleClient - - // addrMtx protects addr field. Should not be taken before the mtx. - addrMtx sync.RWMutex - addr network.AddressGroup - - opts ClientCacheOpts -} - -const defaultReconnectInterval = time.Second * 30 - -func newMultiClient(addr network.AddressGroup, opts ClientCacheOpts) *multiClient { - if opts.ReconnectTimeout <= 0 { - opts.ReconnectTimeout = defaultReconnectInterval - } - return &multiClient{ - clients: make(map[string]*singleClient), - addr: addr, - opts: opts, - } -} - -func (x *multiClient) createForAddress(ctx context.Context, addr network.Address) (clientcore.Client, error) { - var c client.Client - - prmInit := client.PrmInit{ - DisableFrostFSErrorResolution: true, - } - if x.opts.Key != nil { - prmInit.Key = *x.opts.Key - } - - grpcOpts := []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), - ), - grpc.WithChainStreamInterceptor( - qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - tagging.NewStreamClientInterceptor(), - ), - grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - } - - prmDial := client.PrmDial{ - Endpoint: addr.URIAddr(), - GRPCDialOptions: grpcOpts, - } - if x.opts.DialTimeout > 0 { - prmDial.DialTimeout = x.opts.DialTimeout - } - - if x.opts.StreamTimeout > 0 { - prmDial.StreamTimeout = x.opts.StreamTimeout - } - - if x.opts.ResponseCallback != nil { - prmInit.ResponseInfoCallback = x.opts.ResponseCallback - } - - c.Init(prmInit) - err := c.Dial(ctx, prmDial) - if err != nil { - return nil, fmt.Errorf("can't init SDK client: %w", err) - } - - return &c, nil -} - -// updateGroup replaces current multiClient addresses with a new group. -// Old addresses not present in group are removed. -func (x *multiClient) updateGroup(group network.AddressGroup) { - // Firstly, remove old clients. - cache := make([]string, 0, group.Len()) - group.IterateAddresses(func(a network.Address) bool { - cache = append(cache, a.String()) - return false - }) - - x.addrMtx.RLock() - oldGroup := x.addr - x.addrMtx.RUnlock() - if len(oldGroup) == len(cache) { - needUpdate := false - for i := range oldGroup { - if cache[i] != oldGroup[i].String() { - needUpdate = true - break - } - } - if !needUpdate { - return - } - } - - x.mtx.Lock() - defer x.mtx.Unlock() -loop: - for a := range x.clients { - for i := range cache { - if cache[i] == a { - continue loop - } - } - x.clients[a].invalidate() - delete(x.clients, a) - } - - // Then add new clients. - x.addrMtx.Lock() - x.addr = group - x.addrMtx.Unlock() -} - -var errRecentlyFailed = errors.New("client has recently failed, skipping") - -func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Client) error) error { - var firstErr error - - x.addrMtx.RLock() - group := x.addr - x.addrMtx.RUnlock() - - group.IterateAddresses(func(addr network.Address) bool { - select { - case <-ctx.Done(): - firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled) - return true - default: - } - - var err error - - c, err := x.client(ctx, addr) - if err == nil { - err = f(c) - } - - // non-status logic error that could be returned - // from the SDK client; should not be considered - // as a connection error - var siErr *objectSDK.SplitInfoError - var eiErr *objectSDK.ECInfoError - - if err != nil { - err = fmt.Errorf("client connection error at %v: %w", addr, err) - x.ReportError(err) - } - - success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr) - if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) { - firstErr = err - } - - return success - }) - - return firstErr -} - -func (x *multiClient) ReportError(err error) { - if errors.Is(err, errRecentlyFailed) { - return - } - - if status.Code(err) == codes.Canceled || errors.Is(err, context.Canceled) { - return - } - - // non-status logic error that could be returned - // from the SDK client; should not be considered - // as a connection error - var siErr *objectSDK.SplitInfoError - var eiErr *objectSDK.ECInfoError - if errors.As(err, &siErr) || errors.As(err, &eiErr) { - return - } - - // Dropping all clients here is not necessary, we do this - // because `multiClient` doesn't yet provide convenient interface - // for reporting individual errors for streaming operations. - x.mtx.RLock() - for _, sc := range x.clients { - sc.invalidate() - } - x.mtx.RUnlock() -} - -func (s *singleClient) invalidate() { - s.Lock() - if s.client != nil { - _ = s.client.Close() - } - s.client = nil - s.Unlock() -} - -func (x *multiClient) ObjectPutInit(ctx context.Context, p client.PrmObjectPutInit) (res client.ObjectWriter, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectPutInit(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectPutSingle(ctx context.Context, p client.PrmObjectPutSingle) (res *client.ResObjectPutSingle, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectPutSingle(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectDelete(ctx context.Context, p client.PrmObjectDelete) (res *client.ResObjectDelete, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectDelete(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectGetInit(ctx context.Context, p client.PrmObjectGet) (res *client.ObjectReader, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectGetInit(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectRangeInit(ctx context.Context, p client.PrmObjectRange) (res *client.ObjectRangeReader, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectRangeInit(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectHead(ctx context.Context, p client.PrmObjectHead) (res *client.ResObjectHead, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectHead(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectHash(ctx context.Context, p client.PrmObjectHash) (res *client.ResObjectHash, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectHash(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ObjectSearchInit(ctx context.Context, p client.PrmObjectSearch) (res *client.ObjectListReader, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.ObjectSearchInit(ctx, p) - return err - }) - - return -} - -func (x *multiClient) ExecRaw(func(client *rawclient.Client) error) error { - panic("multiClient.ExecRaw() must not be called") -} - -func (x *multiClient) Close() error { - x.mtx.RLock() - - { - for _, c := range x.clients { - if c.client != nil { - _ = c.client.Close() - } - } - } - - x.mtx.RUnlock() - - return nil -} - -func (x *multiClient) RawForAddress(ctx context.Context, addr network.Address, f func(client *rawclient.Client) error) error { - c, err := x.client(ctx, addr) - if err != nil { - return err - } - - err = c.ExecRaw(f) - if err != nil { - x.ReportError(err) - } - return err -} - -func (x *multiClient) client(ctx context.Context, addr network.Address) (clientcore.Client, error) { - strAddr := addr.String() - - x.mtx.RLock() - c, cached := x.clients[strAddr] - x.mtx.RUnlock() - - if cached { - c.RLock() - if c.client != nil { - cl := c.client - c.RUnlock() - return cl, nil - } - if x.opts.ReconnectTimeout != 0 && time.Since(c.lastAttempt) < x.opts.ReconnectTimeout { - c.RUnlock() - return nil, errRecentlyFailed - } - c.RUnlock() - } else { - var ok bool - x.mtx.Lock() - c, ok = x.clients[strAddr] - if !ok { - c = new(singleClient) - x.clients[strAddr] = c - } - x.mtx.Unlock() - } - - c.Lock() - defer c.Unlock() - - if c.client != nil { - return c.client, nil - } - - if x.opts.ReconnectTimeout != 0 && time.Since(c.lastAttempt) < x.opts.ReconnectTimeout { - return nil, errRecentlyFailed - } - - cl, err := x.createForAddress(ctx, addr) - if err != nil { - c.lastAttempt = time.Now() - return nil, err - } - - c.client = cl - return cl, nil -} diff --git a/pkg/network/group.go b/pkg/network/group.go deleted file mode 100644 index 0044fb2d4..000000000 --- a/pkg/network/group.go +++ /dev/null @@ -1,174 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "iter" - "slices" - "sort" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// AddressGroup represents list of network addresses of the node. -// -// List is sorted by priority of use. -type AddressGroup []Address - -// StringifyGroup returns concatenation of all addresses -// from the AddressGroup. -// -// The result is order-dependent. -func StringifyGroup(x AddressGroup) string { - var s string - - iterateAllAddresses(x, func(addr Address) { - s += addr.String() - }) - - return s -} - -// IterateAddresses iterates over all network addresses of the node. -// -// Breaks iterating on handler's true return. -// -// Handler should not be nil. -func (x AddressGroup) IterateAddresses(f func(Address) bool) { - for i := range x { - if f(x[i]) { - break - } - } -} - -// iterateAllAddresses iterates over all network addresses of g -// and passes each of them to f. -func iterateAllAddresses(g AddressGroup, f func(Address)) { - g.IterateAddresses(func(addr Address) bool { - f(addr) - return false - }) -} - -// Len returns number of addresses in AddressGroup. -func (x AddressGroup) Len() int { - return len(x) -} - -// Less returns true if i-th address in AddressGroup supports TLS -// and j-th one doesn't. -func (x AddressGroup) Less(i, j int) bool { - return x[i].IsTLSEnabled() && !x[j].IsTLSEnabled() -} - -// Swap swaps i-th and j-th addresses in AddressGroup. -func (x AddressGroup) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} - -// MultiAddressIterator is an interface of network address group. -type MultiAddressIterator interface { - // Addresses must return an iterator over network addresses. - Addresses() iter.Seq[string] - - // NumberOfAddresses must return number of addresses in group. - NumberOfAddresses() int -} - -// FromStringSlice forms AddressGroup from a string slice. -// -// Returns an error in the absence of addresses or if any of the addresses are incorrect. -func (x *AddressGroup) FromStringSlice(addr []string) error { - if len(addr) == 0 { - return errors.New("missing network addresses") - } - - res := make(AddressGroup, len(addr)) - for i := range addr { - var a Address - if err := a.FromString(addr[i]); err != nil { - return err // invalid format, ignore the whole field - } - res[i] = a - } - - *x = res - return nil -} - -// FromIterator forms AddressGroup from MultiAddressIterator structure. -// The result is sorted with sort.Sort. -// -// Returns an error in the absence of addresses or if any of the addresses are incorrect. -func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { - as := *x - - addrNum := iter.NumberOfAddresses() - if addrNum <= 0 { - return errors.New("missing network addresses") - } - - if cap(as) >= addrNum { - as = as[:0] - } else { - as = make(AddressGroup, 0, addrNum) - } - - err := iterateParsedAddresses(iter, func(a Address) error { - as = append(as, a) - return nil - }) - - if err == nil { - sort.Sort(as) - *x = as - } - - return err -} - -// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f -// until 1st parsing failure or f's error. -func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - for s := range iter.Addresses() { - var a Address - - err = a.FromString(s) - if err != nil { - return fmt.Errorf("could not parse address from string: %w", err) - } - - err = f(a) - if err != nil { - return err - } - } - - return -} - -// WriteToNodeInfo writes AddressGroup to netmap.NodeInfo structure. -func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) { - num := g.Len() - addrs := make([]string, 0, num) - - iterateAllAddresses(g, func(addr Address) { - ni.SetNetworkEndpoints() - addrs = append(addrs, addr.String()) - }) - - ni.SetNetworkEndpoints(addrs...) -} - -// Intersects checks if two AddressGroup have -// at least one common address. -func (x AddressGroup) Intersects(x2 AddressGroup) bool { - for i := range x { - if slices.ContainsFunc(x2, x[i].equal) { - return true - } - } - - return false -} diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go deleted file mode 100644 index d08264533..000000000 --- a/pkg/network/group_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package network - -import ( - "iter" - "slices" - "sort" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAddressGroup_FromStringSlice(t *testing.T) { - addrs := []string{ - "/dns4/node1.frostfs/tcp/8080", - "/dns4/node2.frostfs/tcp/1234/tls", - } - expected := make(AddressGroup, len(addrs)) - for i := range addrs { - expected[i] = Address{buildMultiaddr(addrs[i], t)} - } - - var ag AddressGroup - t.Run("empty", func(t *testing.T) { - require.Error(t, ag.FromStringSlice(nil)) - }) - - require.NoError(t, ag.FromStringSlice(addrs)) - require.Equal(t, expected, ag) - - t.Run("error is returned, group is unchanged", func(t *testing.T) { - require.Error(t, ag.FromStringSlice([]string{"invalid"})) - require.Equal(t, expected, ag) - }) -} - -func TestAddressGroup_FromIterator(t *testing.T) { - addrs := testIterator{ - "/dns4/node1.frostfs/tcp/8080", - "/dns4/node2.frostfs/tcp/1234/tls", - } - expected := make(AddressGroup, len(addrs)) - for i := range addrs { - expected[i] = Address{buildMultiaddr(addrs[i], t)} - } - sort.Sort(expected) - - var ag AddressGroup - t.Run("empty", func(t *testing.T) { - require.Error(t, ag.FromIterator(testIterator{})) - }) - - require.NoError(t, ag.FromIterator(addrs)) - require.Equal(t, expected, ag) - - t.Run("error is returned, group is unchanged", func(t *testing.T) { - require.Error(t, ag.FromIterator(testIterator{"invalid"})) - require.Equal(t, expected, ag) - }) -} - -type testIterator []string - -func (t testIterator) Addresses() iter.Seq[string] { - return slices.Values(t) -} - -func (t testIterator) NumberOfAddresses() int { - return len(t) -} diff --git a/pkg/network/tls.go b/pkg/network/tls.go deleted file mode 100644 index 544dc8240..000000000 --- a/pkg/network/tls.go +++ /dev/null @@ -1,18 +0,0 @@ -package network - -import ( - "github.com/multiformats/go-multiaddr" -) - -const ( - tlsProtocolName = "tls" -) - -// tls var is used for (un)wrapping other multiaddrs around TLS multiaddr. -var tls, _ = multiaddr.NewMultiaddr("/" + tlsProtocolName) - -// IsTLSEnabled searches for wrapped TLS protocol in multiaddr. -func (a Address) IsTLSEnabled() bool { - _, err := a.ma.ValueForProtocol(multiaddr.P_TLS) - return err == nil -} diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go deleted file mode 100644 index 14729f4c2..000000000 --- a/pkg/network/tls_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package network - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAddress_TLSEnabled(t *testing.T) { - testCases := [...]struct { - input string - wantTLS bool - }{ - {"/dns4/localhost/tcp/8080", false}, - {"/dns4/localhost/tcp/8080/tls", true}, - {"/tls/dns4/localhost/tcp/8080", true}, - {"grpc://localhost:8080", false}, - {"grpcs://localhost:8080", true}, - } - - var addr Address - - for _, test := range testCases { - err := addr.FromString(test.input) - require.NoError(t, err) - - require.Equal(t, test.wantTLS, addr.IsTLSEnabled(), test.input) - } -} - -func BenchmarkAddressTLSEnabled(b *testing.B) { - var addr Address - err := addr.FromString("/dns4/localhost/tcp/8080/tls") - require.NoError(b, err) - - b.ResetTimer() - b.ReportAllocs() - - var enabled bool - for range b.N { - enabled = addr.IsTLSEnabled() - } - require.True(b, enabled) -} diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go deleted file mode 100644 index 78129bfbe..000000000 --- a/pkg/network/transport/accounting/grpc/service.go +++ /dev/null @@ -1,37 +0,0 @@ -package accounting - -import ( - "context" - - accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" - accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc" -) - -// Server wraps FrostFS API Accounting service and -// provides gRPC Accounting service server interface. -type Server struct { - srv accountingsvc.Server -} - -// New creates, initializes and returns Server instance. -func New(c accountingsvc.Server) *Server { - return &Server{ - srv: c, - } -} - -// Balance converts gRPC BalanceRequest message and passes it to internal Accounting service. -func (s *Server) Balance(ctx context.Context, req *accountingGRPC.BalanceRequest) (*accountingGRPC.BalanceResponse, error) { - balReq := new(accounting.BalanceRequest) - if err := balReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Balance(ctx, balReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*accountingGRPC.BalanceResponse), nil -} diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go deleted file mode 100644 index 850d38a65..000000000 --- a/pkg/network/transport/apemanager/grpc/service.go +++ /dev/null @@ -1,63 +0,0 @@ -package apemanager - -import ( - "context" - - apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" - apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" -) - -type Server struct { - srv apemanager_svc.Server -} - -var _ apemanager_grpc.APEManagerServiceServer = (*Server)(nil) - -func New(c apemanager_svc.Server) *Server { - return &Server{ - srv: c, - } -} - -func (s *Server) AddChain(ctx context.Context, req *apemanager_grpc.AddChainRequest) (*apemanager_grpc.AddChainResponse, error) { - v2req := new(apemanager_v2.AddChainRequest) - if err := v2req.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.AddChain(ctx, v2req) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*apemanager_grpc.AddChainResponse), nil -} - -func (s *Server) RemoveChain(ctx context.Context, req *apemanager_grpc.RemoveChainRequest) (*apemanager_grpc.RemoveChainResponse, error) { - v2req := new(apemanager_v2.RemoveChainRequest) - if err := v2req.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.RemoveChain(ctx, v2req) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*apemanager_grpc.RemoveChainResponse), nil -} - -func (s *Server) ListChains(ctx context.Context, req *apemanager_grpc.ListChainsRequest) (*apemanager_grpc.ListChainsResponse, error) { - v2req := new(apemanager_v2.ListChainsRequest) - if err := v2req.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.ListChains(ctx, v2req) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*apemanager_grpc.ListChainsResponse), nil -} diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go deleted file mode 100644 index 8cbf8d9c3..000000000 --- a/pkg/network/transport/container/grpc/service.go +++ /dev/null @@ -1,105 +0,0 @@ -package container - -import ( - "context" - - containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" -) - -// Server wraps FrostFS API Container service and -// provides gRPC Container service server interface. -type Server struct { - srv containersvc.Server -} - -// New creates, initializes and returns Server instance. -func New(c containersvc.Server) *Server { - return &Server{ - srv: c, - } -} - -// Put converts gRPC PutRequest message and passes it to internal Container service. -func (s *Server) Put(ctx context.Context, req *containerGRPC.PutRequest) (*containerGRPC.PutResponse, error) { - putReq := new(container.PutRequest) - if err := putReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Put(ctx, putReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.PutResponse), nil -} - -// Delete converts gRPC DeleteRequest message and passes it to internal Container service. -func (s *Server) Delete(ctx context.Context, req *containerGRPC.DeleteRequest) (*containerGRPC.DeleteResponse, error) { - delReq := new(container.DeleteRequest) - if err := delReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Delete(ctx, delReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.DeleteResponse), nil -} - -// Get converts gRPC GetRequest message and passes it to internal Container service. -func (s *Server) Get(ctx context.Context, req *containerGRPC.GetRequest) (*containerGRPC.GetResponse, error) { - getReq := new(container.GetRequest) - if err := getReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Get(ctx, getReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.GetResponse), nil -} - -// List converts gRPC ListRequest message and passes it to internal Container service. -func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*containerGRPC.ListResponse, error) { - listReq := new(container.ListRequest) - if err := listReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.List(ctx, listReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil -} - -type containerStreamerV2 struct { - containerGRPC.ContainerService_ListStreamServer -} - -func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error { - return s.ContainerService_ListStreamServer.Send( - resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse), - ) -} - -// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data -// to gRPC stream. -func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error { - listReq := new(container.ListStreamRequest) - if err := listReq.FromGRPCMessage(req); err != nil { - return err - } - - return s.srv.ListStream(listReq, &containerStreamerV2{ - ContainerService_ListStreamServer: gStream, - }) -} diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go deleted file mode 100644 index 4bc3a42f8..000000000 --- a/pkg/network/transport/netmap/grpc/service.go +++ /dev/null @@ -1,70 +0,0 @@ -package grpc - -import ( - "context" - - netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" - netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc" -) - -// Server wraps FrostFS API Netmap service and -// provides gRPC Netmap service server interface. -type Server struct { - srv netmapsvc.Server -} - -// New creates, initializes and returns Server instance. -func New(c netmapsvc.Server) *Server { - return &Server{ - srv: c, - } -} - -// LocalNodeInfo converts gRPC request message and passes it to internal netmap service. -func (s *Server) LocalNodeInfo( - ctx context.Context, - req *netmapGRPC.LocalNodeInfoRequest, -) (*netmapGRPC.LocalNodeInfoResponse, error) { - nodeInfoReq := new(netmap.LocalNodeInfoRequest) - if err := nodeInfoReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.LocalNodeInfo(ctx, nodeInfoReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*netmapGRPC.LocalNodeInfoResponse), nil -} - -// NetworkInfo converts gRPC request message and passes it to internal netmap service. -func (s *Server) NetworkInfo(ctx context.Context, req *netmapGRPC.NetworkInfoRequest) (*netmapGRPC.NetworkInfoResponse, error) { - netInfoReq := new(netmap.NetworkInfoRequest) - if err := netInfoReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.NetworkInfo(ctx, netInfoReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*netmapGRPC.NetworkInfoResponse), nil -} - -// NetmapSnapshot converts gRPC request message and passes it to internal netmap service. -func (s *Server) NetmapSnapshot(ctx context.Context, req *netmapGRPC.NetmapSnapshotRequest) (*netmapGRPC.NetmapSnapshotResponse, error) { - snapshotReq := new(netmap.SnapshotRequest) - if err := snapshotReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Snapshot(ctx, snapshotReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*netmapGRPC.NetmapSnapshotResponse), nil -} diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go deleted file mode 100644 index 655b1f9fb..000000000 --- a/pkg/network/transport/object/grpc/get.go +++ /dev/null @@ -1,32 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" -) - -type getStreamerV2 struct { - objectGRPC.ObjectService_GetServer -} - -func (s *getStreamerV2) Send(resp *object.GetResponse) error { - return s.ObjectService_GetServer.Send( - resp.ToGRPCMessage().(*objectGRPC.GetResponse), - ) -} - -// Get converts gRPC GetRequest message and server-side stream and overtakes its data -// to gRPC stream. -func (s *Server) Get(req *objectGRPC.GetRequest, gStream objectGRPC.ObjectService_GetServer) error { - getReq := new(object.GetRequest) - if err := getReq.FromGRPCMessage(req); err != nil { - return err - } - - return s.srv.Get( - getReq, - &getStreamerV2{ - ObjectService_GetServer: gStream, - }, - ) -} diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go deleted file mode 100644 index 7d7ce0e4c..000000000 --- a/pkg/network/transport/object/grpc/range.go +++ /dev/null @@ -1,32 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" -) - -type getRangeStreamerV2 struct { - objectGRPC.ObjectService_GetRangeServer -} - -func (s *getRangeStreamerV2) Send(resp *object.GetRangeResponse) error { - return s.ObjectService_GetRangeServer.Send( - resp.ToGRPCMessage().(*objectGRPC.GetRangeResponse), - ) -} - -// GetRange converts gRPC GetRangeRequest message and server-side stream and overtakes its data -// to gRPC stream. -func (s *Server) GetRange(req *objectGRPC.GetRangeRequest, gStream objectGRPC.ObjectService_GetRangeServer) error { - getRngReq := new(object.GetRangeRequest) - if err := getRngReq.FromGRPCMessage(req); err != nil { - return err - } - - return s.srv.GetRange( - getRngReq, - &getRangeStreamerV2{ - ObjectService_GetRangeServer: gStream, - }, - ) -} diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go deleted file mode 100644 index 8432707f7..000000000 --- a/pkg/network/transport/object/grpc/search.go +++ /dev/null @@ -1,32 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" -) - -type searchStreamerV2 struct { - objectGRPC.ObjectService_SearchServer -} - -func (s *searchStreamerV2) Send(resp *object.SearchResponse) error { - return s.ObjectService_SearchServer.Send( - resp.ToGRPCMessage().(*objectGRPC.SearchResponse), - ) -} - -// Search converts gRPC SearchRequest message and server-side stream and overtakes its data -// to gRPC stream. -func (s *Server) Search(req *objectGRPC.SearchRequest, gStream objectGRPC.ObjectService_SearchServer) error { - searchReq := new(object.SearchRequest) - if err := searchReq.FromGRPCMessage(req); err != nil { - return err - } - - return s.srv.Search( - searchReq, - &searchStreamerV2{ - ObjectService_SearchServer: gStream, - }, - ) -} diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go deleted file mode 100644 index 15dacd553..000000000 --- a/pkg/network/transport/object/grpc/service.go +++ /dev/null @@ -1,168 +0,0 @@ -package object - -import ( - "context" - "errors" - "io" - - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" -) - -// Server wraps FrostFS API Object service and -// provides gRPC Object service server interface. -type Server struct { - srv objectSvc.ServiceServer -} - -// New creates, initializes and returns Server instance. -func New(c objectSvc.ServiceServer) *Server { - return &Server{ - srv: c, - } -} - -// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream. -func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error { - stream, err := s.srv.Patch(gStream.Context()) - if err != nil { - return err - } - - for { - req, err := gStream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - resp, err := stream.CloseAndRecv(gStream.Context()) - if err != nil { - return err - } - - return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse)) - } - - return err - } - - patchReq := new(object.PatchRequest) - if err := patchReq.FromGRPCMessage(req); err != nil { - return err - } - - if err := stream.Send(gStream.Context(), patchReq); err != nil { - if errors.Is(err, util.ErrAbortStream) { - resp, err := stream.CloseAndRecv(gStream.Context()) - if err != nil { - return err - } - - return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse)) - } - - return err - } - } -} - -// Put opens internal Object service Put stream and overtakes data from gRPC stream to it. -func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error { - stream, err := s.srv.Put(gStream.Context()) - if err != nil { - return err - } - - for { - req, err := gStream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - resp, err := stream.CloseAndRecv(gStream.Context()) - if err != nil { - return err - } - - return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PutResponse)) - } - - return err - } - - putReq := new(object.PutRequest) - if err := putReq.FromGRPCMessage(req); err != nil { - return err - } - - if err := stream.Send(gStream.Context(), putReq); err != nil { - if errors.Is(err, util.ErrAbortStream) { - resp, err := stream.CloseAndRecv(gStream.Context()) - if err != nil { - return err - } - - return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PutResponse)) - } - - return err - } - } -} - -// Delete converts gRPC DeleteRequest message and passes it to internal Object service. -func (s *Server) Delete(ctx context.Context, req *objectGRPC.DeleteRequest) (*objectGRPC.DeleteResponse, error) { - delReq := new(object.DeleteRequest) - if err := delReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Delete(ctx, delReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*objectGRPC.DeleteResponse), nil -} - -// Head converts gRPC HeadRequest message and passes it to internal Object service. -func (s *Server) Head(ctx context.Context, req *objectGRPC.HeadRequest) (*objectGRPC.HeadResponse, error) { - searchReq := new(object.HeadRequest) - if err := searchReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Head(ctx, searchReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*objectGRPC.HeadResponse), nil -} - -// GetRangeHash converts gRPC GetRangeHashRequest message and passes it to internal Object service. -func (s *Server) GetRangeHash(ctx context.Context, req *objectGRPC.GetRangeHashRequest) (*objectGRPC.GetRangeHashResponse, error) { - hashRngReq := new(object.GetRangeHashRequest) - if err := hashRngReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.GetRangeHash(ctx, hashRngReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*objectGRPC.GetRangeHashResponse), nil -} - -func (s *Server) PutSingle(ctx context.Context, req *objectGRPC.PutSingleRequest) (*objectGRPC.PutSingleResponse, error) { - putSingleReq := &object.PutSingleRequest{} - if err := putSingleReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.PutSingle(ctx, putSingleReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*objectGRPC.PutSingleResponse), nil -} diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go deleted file mode 100644 index 6fce397f3..000000000 --- a/pkg/network/transport/session/grpc/service.go +++ /dev/null @@ -1,37 +0,0 @@ -package session - -import ( - "context" - - sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" -) - -// Server wraps FrostFS API Session service and -// provides gRPC Session service server interface. -type Server struct { - srv sessionsvc.Server -} - -// New creates, initializes and returns Server instance. -func New(c sessionsvc.Server) *Server { - return &Server{ - srv: c, - } -} - -// Create converts gRPC CreateRequest message and passes it to internal Session service. -func (s *Server) Create(ctx context.Context, req *sessionGRPC.CreateRequest) (*sessionGRPC.CreateResponse, error) { - createReq := new(session.CreateRequest) - if err := createReq.FromGRPCMessage(req); err != nil { - return nil, err - } - - resp, err := s.srv.Create(ctx, createReq) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*sessionGRPC.CreateResponse), nil -} diff --git a/pkg/network/validation.go b/pkg/network/validation.go deleted file mode 100644 index b5157f28f..000000000 --- a/pkg/network/validation.go +++ /dev/null @@ -1,88 +0,0 @@ -package network - -import ( - "errors" - "iter" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -const ( - // maxProtocolsAmount is maximal amount of protocols - // in multiaddress after parsing with network.AddressFromString. - maxProtocolsAmount = 3 - - // minProtocolsAmount is minimal amount of protocols - // in multiaddress after parsing with network.AddressFromString: - // host(ip) and port. - minProtocolsAmount = 2 - - // network protocols. - dns, ip4, ip6 = "dns4", "ip4", "ip6" - - // transport protocols. - tcp = "tcp" -) - -var ( - errIncorrectProtocolAmount = errors.New("numbers of protocols in multiaddress incorrect") - errUnsupportedNetworkProtocol = errors.New("unsupported network protocol in multiaddress") - errUnsupportedTransportProtocol = errors.New("unsupported transport protocol in multiaddress") - errUnsupportedPresentationProtocol = errors.New("unsupported presentation protocol in multiaddress") -) - -// NodeEndpointsIterator is a wrapper over netmap.NodeInfo which implements -// MultiAddressIterator. -type NodeEndpointsIterator netmap.NodeInfo - -func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { - return (netmap.NodeInfo)(x).NetworkEndpoints() -} - -func (x NodeEndpointsIterator) NumberOfAddresses() int { - return (netmap.NodeInfo)(x).NumberOfNetworkEndpoints() -} - -// VerifyMultiAddress validates multiaddress of n. -// -// If n's address contains more than 3 protocols -// or less than 2 protocols an error returns. -// -// If n's address's protocol order is incorrect -// an error returns. -// -// Correct composition(and order from low to high level) -// of protocols: -// 1. dns4/ip4/ip6 -// 2. tcp -// 3. tls(optional, may be absent) -func VerifyMultiAddress(ni netmap.NodeInfo) error { - return iterateParsedAddresses(NodeEndpointsIterator(ni), checkProtocols) -} - -func checkProtocols(a Address) error { - pp := a.ma.Protocols() - parsedProtocolsAmount := len(pp) - - if parsedProtocolsAmount > maxProtocolsAmount || parsedProtocolsAmount < minProtocolsAmount { - return errIncorrectProtocolAmount - } - - switch pp[0].Name { - case dns, ip4, ip6: - default: - return errUnsupportedNetworkProtocol - } - - if pp[1].Name != tcp { - return errUnsupportedTransportProtocol - } - - if parsedProtocolsAmount != minProtocolsAmount { - if pp[2].Name != tlsProtocolName { - return errUnsupportedPresentationProtocol - } - } - - return nil -} diff --git a/pkg/network/validation_test.go b/pkg/network/validation_test.go deleted file mode 100644 index 7d3cb9577..000000000 --- a/pkg/network/validation_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package network - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/stretchr/testify/require" -) - -type testCase struct { - input string - err error -} - -func TestVerifyMultiAddress_Order(t *testing.T) { - testCases := []testCase{ - { - input: "/ip4/1.2.3.4/tcp/80", - err: nil, - }, - { - input: "/ip6/1.2.3.4/tcp/80", - err: nil, - }, - { - input: "/dns4/1.2.3.4/tcp/80", - err: nil, - }, - { - input: "/dns4/1.2.3.4/tcp/80/tls", - err: nil, - }, - { - input: "/tls/dns4/1.2.3.4/tcp/80", - err: errUnsupportedNetworkProtocol, - }, - { - input: "/dns4/1.2.3.4/tls/tcp/80", - err: errUnsupportedTransportProtocol, - }, - { - input: "/dns4/1.2.3.4/tcp/80/wss", - err: errUnsupportedPresentationProtocol, - }, - } - - for _, test := range testCases { - ni := constructNodeInfo(test.input) - - if test.err != nil { - require.EqualError(t, test.err, VerifyMultiAddress(ni).Error()) - } else { - require.NoError(t, VerifyMultiAddress(ni)) - } - } -} - -func constructNodeInfo(address string) (ni netmap.NodeInfo) { - ni.SetNetworkEndpoints(address) - return ni -} diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go deleted file mode 100644 index 93e44c52b..000000000 --- a/pkg/services/accounting/executor.go +++ /dev/null @@ -1,39 +0,0 @@ -package accounting - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" -) - -type ServiceExecutor interface { - Balance(context.Context, *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) -} - -type executorSvc struct { - exec ServiceExecutor - respSvc *response.Service -} - -// NewExecutionService wraps ServiceExecutor and returns Accounting Service interface. -func NewExecutionService(exec ServiceExecutor, respSvc *response.Service) Server { - return &executorSvc{ - exec: exec, - respSvc: respSvc, - } -} - -func (s *executorSvc) Balance(ctx context.Context, req *accounting.BalanceRequest) (*accounting.BalanceResponse, error) { - respBody, err := s.exec.Balance(ctx, req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute Balance request: %w", err) - } - - resp := new(accounting.BalanceResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go deleted file mode 100644 index 6c2df8428..000000000 --- a/pkg/services/accounting/morph/executor.go +++ /dev/null @@ -1,55 +0,0 @@ -package accounting - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type morphExecutor struct { - client *balance.Client -} - -func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor { - return &morphExecutor{ - client: client, - } -} - -func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { - idV2 := body.GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing account") - } - - var id user.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid account: %w", err) - } - - amount, err := s.client.BalanceOf(ctx, id) - if err != nil { - return nil, err - } - - balancePrecision, err := s.client.Decimals(ctx) - if err != nil { - return nil, err - } - - dec := new(accounting.Decimal) - dec.SetValue(amount.Int64()) - dec.SetPrecision(balancePrecision) - - res := new(accounting.BalanceResponseBody) - res.SetBalance(dec) - - return res, nil -} diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go deleted file mode 100644 index a280416fb..000000000 --- a/pkg/services/accounting/server.go +++ /dev/null @@ -1,12 +0,0 @@ -package accounting - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" -) - -// Server is an interface of the FrostFS API Accounting service server. -type Server interface { - Balance(context.Context, *accounting.BalanceRequest) (*accounting.BalanceResponse, error) -} diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go deleted file mode 100644 index d8feb76bd..000000000 --- a/pkg/services/accounting/sign.go +++ /dev/null @@ -1,27 +0,0 @@ -package accounting - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" -) - -type signService struct { - sigSvc *util.SignService - - svc Server -} - -func NewSignService(key *ecdsa.PrivateKey, svc Server) Server { - return &signService{ - sigSvc: util.NewUnarySignService(key), - svc: svc, - } -} - -func (s *signService) Balance(ctx context.Context, req *accounting.BalanceRequest) (*accounting.BalanceResponse, error) { - resp, err := util.EnsureNonNilResponse(s.svc.Balance(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go deleted file mode 100644 index 61fb025b8..000000000 --- a/pkg/services/apemanager/audit.go +++ /dev/null @@ -1,75 +0,0 @@ -package apemanager - -import ( - "context" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" - ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" -) - -var _ Server = (*auditService)(nil) - -type auditService struct { - next Server - log *logger.Logger - enabled *atomic.Bool -} - -func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Server { - return &auditService{ - next: next, - log: log, - enabled: enabled, - } -} - -// AddChain implements Server. -func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainRequest) (*apemanager.AddChainResponse, error) { - res, err := a.next.AddChain(ctx, req) - if !a.enabled.Load() { - return res, err - } - - audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, - audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), - req.GetBody().GetTarget().GetName(), - res.GetBody().GetChainID()), - err == nil) - - return res, err -} - -// ListChains implements Server. -func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChainsRequest) (*apemanager.ListChainsResponse, error) { - res, err := a.next.ListChains(ctx, req) - if !a.enabled.Load() { - return res, err - } - - audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, - audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), - req.GetBody().GetTarget().GetName(), - nil), - err == nil) - - return res, err -} - -// RemoveChain implements Server. -func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveChainRequest) (*apemanager.RemoveChainResponse, error) { - res, err := a.next.RemoveChain(ctx, req) - if !a.enabled.Load() { - return res, err - } - - audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, - audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), - req.GetBody().GetTarget().GetName(), - req.GetBody().GetChainID()), - err == nil) - - return res, err -} diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go deleted file mode 100644 index 1d485321c..000000000 --- a/pkg/services/apemanager/errors/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package errors - -import ( - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -func ErrAPEManagerAccessDenied(reason string) error { - err := new(apistatus.APEManagerAccessDenied) - err.WriteReason(reason) - return err -} - -func ErrAPEManagerInvalidArgument(msg string) error { - err := new(apistatus.InvalidArgument) - err.SetMessage(msg) - return err -} diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go deleted file mode 100644 index fc08fe569..000000000 --- a/pkg/services/apemanager/executor.go +++ /dev/null @@ -1,261 +0,0 @@ -package apemanager - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "errors" - "fmt" - - ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape" - apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/mr-tron/base58/base58" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" -) - -var errEmptyBodySignature = errors.New("malformed request: empty body signature") - -type cfg struct { - log *logger.Logger -} - -type Service struct { - cfg - - waiter Waiter - - cnrSrc containercore.Source - - contractStorage ape_contract.ProxyAdaptedContractStorage -} - -type Option func(*cfg) - -type Waiter interface { - WaitTxHalt(context.Context, uint32, util.Uint256) error -} - -func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service { - s := &Service{ - cnrSrc: cnrSrc, - - contractStorage: contractStorage, - - waiter: waiter, - } - - for i := range opts { - opts[i](&s.cfg) - } - - if s.log == nil { - s.log = logger.NewLoggerWrapper(zap.NewNop()) - } - - return s -} - -func WithLogger(log *logger.Logger) Option { - return func(c *cfg) { - c.log = log - } -} - -var _ Server = (*Service)(nil) - -// validateContainerTargetRequest validates request for the container target. -// It checks if request actor is the owner of the container, otherwise it denies the request. -func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { - var cidSDK cidSDK.ID - if err := cidSDK.DecodeString(cid); err != nil { - return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err)) - } - isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) - if err != nil { - return fmt.Errorf("failed to check owner: %w", err) - } - if !isOwner { - return apemanager_errors.ErrAPEManagerAccessDenied("actor must be container owner") - } - return nil -} - -func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { - pub, err := getSignaturePublicKey(req.GetVerificationHeader()) - if err != nil { - return nil, err - } - - chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw()) - if err != nil { - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error()) - } - if len(chain.ID) == 0 { - const randomIDLength = 10 - randID, err := base58Str(randomIDLength) - if err != nil { - return nil, fmt.Errorf("randomize chain ID error: %w", err) - } - chain.ID = apechain.ID(randID) - } - - var target policy_engine.Target - - switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { - case apeV2.TargetTypeContainer: - reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { - return nil, err - } - target = policy_engine.ContainerTarget(reqCID) - default: - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) - } - - txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) - if err != nil { - return nil, err - } - if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { - return nil, err - } - - body := new(apemanagerV2.AddChainResponseBody) - body.SetChainID(chain.ID) - - resp := new(apemanagerV2.AddChainResponse) - resp.SetBody(body) - - return resp, nil -} - -func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { - pub, err := getSignaturePublicKey(req.GetVerificationHeader()) - if err != nil { - return nil, err - } - - var target policy_engine.Target - - switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { - case apeV2.TargetTypeContainer: - reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { - return nil, err - } - target = policy_engine.ContainerTarget(reqCID) - default: - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) - } - - txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) - if err != nil { - return nil, err - } - if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { - return nil, err - } - - body := new(apemanagerV2.RemoveChainResponseBody) - - resp := new(apemanagerV2.RemoveChainResponse) - resp.SetBody(body) - - return resp, nil -} - -func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { - pub, err := getSignaturePublicKey(req.GetVerificationHeader()) - if err != nil { - return nil, err - } - - var target policy_engine.Target - - switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { - case apeV2.TargetTypeContainer: - reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { - return nil, err - } - target = policy_engine.ContainerTarget(reqCID) - default: - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) - } - - chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target) - if err != nil { - return nil, err - } - - res := make([]*apeV2.Chain, 0, len(chs)) - for _, ch := range chs { - v2chraw := new(apeV2.ChainRaw) - v2chraw.SetRaw(ch.Bytes()) - - v2ch := new(apeV2.Chain) - v2ch.SetKind(v2chraw) - - res = append(res, v2ch) - } - - body := new(apemanagerV2.ListChainsResponseBody) - body.SetChains(res) - - resp := new(apemanagerV2.ListChainsResponse) - resp.SetBody(body) - - return resp, nil -} - -func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicKey, error) { - for vh.GetOrigin() != nil { - vh = vh.GetOrigin() - } - sig := vh.GetBodySignature() - if sig == nil { - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error()) - } - key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) - if err != nil { - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err)) - } - - return key, nil -} - -func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { - var actor user.ID - user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) - actorOwnerID := new(refs.OwnerID) - actor.WriteToV2(actorOwnerID) - - cnr, err := s.cnrSrc.Get(ctx, cid) - if err != nil { - return false, fmt.Errorf("get container error: %w", err) - } - return cnr.Value.Owner().Equals(actor), nil -} - -// base58Str generates base58 string. -func base58Str(n int) (string, error) { - b := make([]byte, n) - _, err := rand.Read(b) - if err != nil { - return "", err - } - return base58.FastBase58Encoding(b), nil -} diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go deleted file mode 100644 index e624177ac..000000000 --- a/pkg/services/apemanager/server.go +++ /dev/null @@ -1,13 +0,0 @@ -package apemanager - -import ( - "context" - - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" -) - -type Server interface { - AddChain(context.Context, *apemanager_v2.AddChainRequest) (*apemanager_v2.AddChainResponse, error) - RemoveChain(context.Context, *apemanager_v2.RemoveChainRequest) (*apemanager_v2.RemoveChainResponse, error) - ListChains(context.Context, *apemanager_v2.ListChainsRequest) (*apemanager_v2.ListChainsResponse, error) -} diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go deleted file mode 100644 index a172624ff..000000000 --- a/pkg/services/apemanager/sign.go +++ /dev/null @@ -1,49 +0,0 @@ -package apemanager - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" -) - -type signService struct { - sigSvc *util.SignService - - next Server -} - -func NewSignService(key *ecdsa.PrivateKey, next Server) Server { - return &signService{ - sigSvc: util.NewUnarySignService(key), - next: next, - } -} - -func (s *signService) AddChain(ctx context.Context, req *apemanager_v2.AddChainRequest) (*apemanager_v2.AddChainResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(apemanager_v2.AddChainResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.next.AddChain(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) RemoveChain(ctx context.Context, req *apemanager_v2.RemoveChainRequest) (*apemanager_v2.RemoveChainResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(apemanager_v2.RemoveChainResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.next.RemoveChain(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) ListChains(ctx context.Context, req *apemanager_v2.ListChainsRequest) (*apemanager_v2.ListChainsResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(apemanager_v2.ListChainsResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.next.ListChains(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} diff --git a/pkg/services/apemanager/validation.go b/pkg/services/apemanager/validation.go deleted file mode 100644 index b26fcf8ee..000000000 --- a/pkg/services/apemanager/validation.go +++ /dev/null @@ -1,23 +0,0 @@ -package apemanager - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ape" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" -) - -func decodeAndValidateChain(encodedChain []byte) (chain apechain.Chain, err error) { - if err = chain.DecodeBytes(encodedChain); err != nil { - return - } - for _, rule := range chain.Rules { - for _, name := range rule.Resources.Names { - if err = ape.ValidateResourceName(name); err != nil { - err = fmt.Errorf("invalid resource: %w", err) - return - } - } - } - return -} diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go deleted file mode 100644 index ac15dd107..000000000 --- a/pkg/services/common/ape/checker.go +++ /dev/null @@ -1,170 +0,0 @@ -package ape - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - - aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -var ( - errBearerExpired = errors.New("bearer token has expired") - errBearerInvalidSignature = errors.New("bearer token has invalid signature") - errBearerInvalidContainerID = errors.New("bearer token was created for another container") - errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner") - errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender") -) - -type CheckPrm struct { - // Request is an APE-request that is checked by policy engine. - Request aperequest.Request - - Namespace string - - Container cid.ID - - // An encoded container's owner user ID. - ContainerOwner user.ID - - // PublicKey is public key of the request sender. - PublicKey *keys.PublicKey - - // The request's bearer token. It is used in order to check APE overrides with the token. - BearerToken *bearer.Token -} - -// CheckCore provides methods to perform the common logic of APE check. -type CheckCore interface { - // CheckAPE performs the common policy-engine check logic on a prepared request. - CheckAPE(ctx context.Context, prm CheckPrm) error -} - -type checkerCoreImpl struct { - LocalOverrideStorage policyengine.LocalOverrideStorage - MorphChainStorage policyengine.MorphRuleChainStorageReader - FrostFSSubjectProvider frostfsidcore.SubjectProvider - State netmap.State -} - -func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, - frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State, -) CheckCore { - return &checkerCoreImpl{ - LocalOverrideStorage: localOverrideStorage, - MorphChainStorage: morphChainStorage, - FrostFSSubjectProvider: frostFSSubjectProvider, - State: state, - } -} - -// CheckAPE performs the common policy-engine check logic on a prepared request. -func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { - var cr policyengine.ChainRouter - if prm.BearerToken != nil { - var err error - if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { - return fmt.Errorf("bearer validation error: %w", err) - } - if prm.BearerToken.Impersonate() { - cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) - } else { - override, isSet := prm.BearerToken.APEOverride() - if !isSet { - return errors.New("expected for override within bearer") - } - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) - } - } - } else { - cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) - } - - groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey) - if err != nil { - return fmt.Errorf("failed to get group ids: %w", err) - } - - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i]) - } - - rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, prm.PublicKey.Address()), groups) - status, found, err := cr.IsAllowed(apechain.Ingress, rt, prm.Request) - if err != nil { - return err - } - if found && status == apechain.Allow { - return nil - } - return newChainRouterError(prm.Request.Operation(), status) -} - -// isValidBearer checks whether bearer token was correctly signed by authorized -// entity. This method might be defined on whole ACL service because it will -// require fetching current epoch to check lifetime. -func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error { - if token == nil { - return nil - } - - // First check token lifetime. Simplest verification. - if token.InvalidAt(st.CurrentEpoch()) { - return errBearerExpired - } - - // Then check if bearer token is signed correctly. - if !token.VerifySignature() { - return errBearerInvalidSignature - } - - // Check for ape overrides defined in the bearer token. - if apeOverride, isSet := token.APEOverride(); isSet { - switch apeOverride.Target.TargetType { - case ape.TargetTypeContainer: - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID - } - default: - } - } - - // Ignore verification checks if token is impersonated. - if token.Impersonate() { - return nil - } - - // Then check if container owner signed this token. - if !bearer.ResolveIssuer(*token).Equals(ownerCnr) { - return errBearerNotSignedByOwner - } - - // Then check if request sender has rights to use this token. - var usrSender user.ID - user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner - } - - return nil -} diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go deleted file mode 100644 index d3c381de7..000000000 --- a/pkg/services/common/ape/error.go +++ /dev/null @@ -1,33 +0,0 @@ -package ape - -import ( - "fmt" - - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" -) - -// ChainRouterError is returned when chain router validation prevents -// the APE request from being processed (no rule found, access denied, etc.). -type ChainRouterError struct { - operation string - status apechain.Status -} - -func (e *ChainRouterError) Error() string { - return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status()) -} - -func (e *ChainRouterError) Operation() string { - return e.operation -} - -func (e *ChainRouterError) Status() apechain.Status { - return e.status -} - -func newChainRouterError(operation string, status apechain.Status) *ChainRouterError { - return &ChainRouterError{ - operation: operation, - status: status, - } -} diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go deleted file mode 100644 index 3b5dab9aa..000000000 --- a/pkg/services/container/ape.go +++ /dev/null @@ -1,756 +0,0 @@ -package container - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "net" - "strings" - - aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/peer" -) - -var ( - errMissingContainerID = errors.New("missing container ID") - errSessionContainerMissmatch = errors.New("requested container is not related to the session") - errMissingVerificationHeader = errors.New("malformed request: empty verification header") - errInvalidSessionTokenSignature = errors.New("malformed request: invalid session token signature") - errInvalidSessionTokenOwner = errors.New("malformed request: invalid session token owner") - errEmptyBodySignature = errors.New("malformed request: empty body signature") - errMissingOwnerID = errors.New("malformed request: missing owner ID") - errOwnerIDIsNotSet = errors.New("owner id is not set") - errInvalidDomainZone = errors.New("invalid domain zone: no namespace is expected") - - undefinedContainerID = cid.ID{} -) - -type ir interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) -} - -type containers interface { - Get(context.Context, cid.ID) (*containercore.Container, error) -} - -type apeChecker struct { - router policyengine.ChainRouter - reader containers - ir ir - nm netmap.Source - - frostFSIDClient frostfsidcore.SubjectProvider - - next Server -} - -func NewAPEServer(router policyengine.ChainRouter, reader containers, ir ir, nm netmap.Source, frostFSIDClient frostfsidcore.SubjectProvider, srv Server) Server { - return &apeChecker{ - router: router, - reader: reader, - ir: ir, - next: srv, - nm: nm, - frostFSIDClient: frostFSIDClient, - } -} - -func (ac *apeChecker) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Delete") - defer span.End() - - if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), - nativeschema.MethodDeleteContainer); err != nil { - return nil, err - } - - return ac.next.Delete(ctx, req) -} - -func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Get") - defer span.End() - - if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), - nativeschema.MethodGetContainer); err != nil { - return nil, err - } - - return ac.next.Get(ctx, req) -} - -func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List") - defer span.End() - - role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) - if err != nil { - return nil, err - } - - reqProps := map[string]string{ - nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), - nativeschema.PropertyKeyActorRole: role, - } - - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) - if err != nil { - return nil, err - } - if p, ok := peer.FromContext(ctx); ok { - if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { - reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() - } - } - - namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) - if err != nil { - return nil, fmt.Errorf("could not get owner namespace: %w", err) - } - if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { - return nil, err - } - - request := aperequest.NewRequest( - nativeschema.MethodListContainers, - aperequest.NewResource( - resourceName(namespace, ""), - make(map[string]string), - ), - reqProps, - ) - - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) - if err != nil { - return nil, fmt.Errorf("failed to get group ids: %w", err) - } - - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) - } - - rt := policyengine.NewRequestTargetWithNamespace(namespace) - rt.User = &policyengine.Target{ - Type: policyengine.User, - Name: fmt.Sprintf("%s:%s", namespace, pk.Address()), - } - rt.Groups = make([]policyengine.Target, len(groups)) - for i := range groups { - rt.Groups[i] = policyengine.GroupTarget(groups[i]) - } - - s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request) - if err != nil { - return nil, err - } - - if found && s == apechain.Allow { - return ac.next.List(ctx, req) - } - - return nil, apeErr(nativeschema.MethodListContainers, s) -} - -func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error { - ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream") - defer span.End() - - role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) - if err != nil { - return err - } - - reqProps := map[string]string{ - nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), - nativeschema.PropertyKeyActorRole: role, - } - - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) - if err != nil { - return err - } - if p, ok := peer.FromContext(ctx); ok { - if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { - reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() - } - } - - namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) - if err != nil { - return fmt.Errorf("could not get owner namespace: %w", err) - } - if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { - return err - } - - request := aperequest.NewRequest( - nativeschema.MethodListContainers, - aperequest.NewResource( - resourceName(namespace, ""), - make(map[string]string), - ), - reqProps, - ) - - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) - if err != nil { - return fmt.Errorf("failed to get group ids: %w", err) - } - - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) - } - - rt := policyengine.NewRequestTargetWithNamespace(namespace) - rt.User = &policyengine.Target{ - Type: policyengine.User, - Name: fmt.Sprintf("%s:%s", namespace, pk.Address()), - } - rt.Groups = make([]policyengine.Target, len(groups)) - for i := range groups { - rt.Groups[i] = policyengine.GroupTarget(groups[i]) - } - - s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request) - if err != nil { - return err - } - - if found && s == apechain.Allow { - return ac.next.ListStream(req, stream) - } - - return apeErr(nativeschema.MethodListContainers, s) -} - -func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put") - defer span.End() - - role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) - if err != nil { - return nil, err - } - - reqProps := map[string]string{ - nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), - nativeschema.PropertyKeyActorRole: role, - } - - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) - if err != nil { - return nil, err - } - if p, ok := peer.FromContext(ctx); ok { - if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { - reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() - } - } - - namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID()) - if err != nil { - return nil, fmt.Errorf("get namespace error: %w", err) - } - if err = validateNamespace(req.GetBody().GetContainer(), namespace); err != nil { - return nil, err - } - - cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer()) - if err != nil { - return nil, fmt.Errorf("get container properties: %w", err) - } - - request := aperequest.NewRequest( - nativeschema.MethodPutContainer, - aperequest.NewResource( - resourceName(namespace, ""), - cnrProps, - ), - reqProps, - ) - - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) - if err != nil { - return nil, fmt.Errorf("failed to get group ids: %w", err) - } - - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) - } - - rt := policyengine.NewRequestTargetWithNamespace(namespace) - rt.User = &policyengine.Target{ - Type: policyengine.User, - Name: fmt.Sprintf("%s:%s", namespace, pk.Address()), - } - rt.Groups = make([]policyengine.Target, len(groups)) - for i := range groups { - rt.Groups[i] = policyengine.GroupTarget(groups[i]) - } - - s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request) - if err != nil { - return nil, err - } - - if found && s == apechain.Allow { - return ac.next.Put(ctx, req) - } - - return nil, apeErr(nativeschema.MethodPutContainer, s) -} - -func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { - if vh == nil { - return "", nil, errMissingVerificationHeader - } - - if oID == nil { - return "", nil, errMissingOwnerID - } - var ownerID user.ID - if err := ownerID.ReadFromV2(*oID); err != nil { - return "", nil, err - } - - actor, pk, err := ac.getActorAndPublicKey(mh, vh, undefinedContainerID) - if err != nil { - return "", nil, err - } - - if actor.Equals(ownerID) { - return nativeschema.PropertyValueContainerRoleOwner, pk, nil - } - - pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(ctx, pkBytes) - if err != nil { - return "", nil, err - } - if isIR { - return nativeschema.PropertyValueContainerRoleIR, pk, nil - } - - return nativeschema.PropertyValueContainerRoleOthers, pk, nil -} - -func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, containerID *refs.ContainerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, op string) error { - if vh == nil { - return errMissingVerificationHeader - } - - id, err := getContainerID(containerID) - if err != nil { - return err - } - - cont, err := ac.reader.Get(ctx, id) - if err != nil { - return err - } - - reqProps, pk, err := ac.getRequestProps(ctx, mh, vh, cont, id) - if err != nil { - return err - } - - namespace := "" - cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cont.Value).Zone(), ".ns") - if hasNamespace { - namespace = cntNamespace - } - - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) - if err != nil { - return fmt.Errorf("failed to get group ids: %w", err) - } - - // Policy contract keeps group related chains as namespace-group pair. - for i := range groups { - groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) - } - - request := aperequest.NewRequest( - op, - aperequest.NewResource( - resourceName(namespace, id.EncodeToString()), - getContainerProps(cont), - ), - reqProps, - ) - - s, found, err := ac.router.IsAllowed(apechain.Ingress, - policyengine.NewRequestTargetExtended(namespace, id.EncodeToString(), fmt.Sprintf("%s:%s", namespace, pk.Address()), groups), - request) - if err != nil { - return err - } - - if found && s == apechain.Allow { - return nil - } - - return apeErr(op, s) -} - -func apeErr(operation string, status apechain.Status) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(fmt.Sprintf("access to container operation %s is denied by access policy engine: %s", operation, status.String())) - return errAccessDenied -} - -func getContainerID(reqContID *refs.ContainerID) (cid.ID, error) { - if reqContID == nil { - return cid.ID{}, errMissingContainerID - } - var id cid.ID - err := id.ReadFromV2(*reqContID) - if err != nil { - return cid.ID{}, fmt.Errorf("invalid container ID: %w", err) - } - return id, nil -} - -func resourceName(namespace string, container string) string { - if namespace == "" && container == "" { - return nativeschema.ResourceFormatRootContainers - } - if namespace == "" && container != "" { - return fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container) - } - if namespace != "" && container == "" { - return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, namespace) - } - return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container) -} - -func getContainerProps(c *containercore.Container) map[string]string { - props := map[string]string{ - nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(), - } - for attrName, attrVal := range c.Value.Attributes() { - name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName) - props[name] = attrVal - } - return props -} - -func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) { - if cnrV2 == nil { - return nil, errors.New("container is not set") - } - c := cnrSDK.Container{} - if err := c.ReadFromV2(*cnrV2); err != nil { - return nil, err - } - return getContainerProps(&containercore.Container{Value: c}), nil -} - -func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, - cont *containercore.Container, cnrID cid.ID, -) (map[string]string, *keys.PublicKey, error) { - actor, pk, err := ac.getActorAndPublicKey(mh, vh, cnrID) - if err != nil { - return nil, nil, err - } - role, err := ac.getRole(ctx, actor, pk, cont, cnrID) - if err != nil { - return nil, nil, err - } - reqProps := map[string]string{ - nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), - nativeschema.PropertyKeyActorRole: role, - } - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) - if err != nil { - return nil, nil, err - } - if p, ok := peer.FromContext(ctx); ok { - if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { - reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() - } - } - return reqProps, pk, nil -} - -func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { - if cont.Value.Owner().Equals(*actor) { - return nativeschema.PropertyValueContainerRoleOwner, nil - } - - pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(ctx, pkBytes) - if err != nil { - return "", err - } - if isIR { - return nativeschema.PropertyValueContainerRoleIR, nil - } - - isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont) - if err != nil { - return "", err - } - if isContainer { - return nativeschema.PropertyValueContainerRoleContainer, nil - } - - return nativeschema.PropertyValueContainerRoleOthers, nil -} - -func (ac *apeChecker) getActorAndPublicKey(mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) { - st, err := ac.getSessionToken(mh) - if err != nil { - return nil, nil, err - } - - if st != nil { - return ac.getActorAndPKFromSessionToken(st, cnrID) - } - return ac.getActorAndPKFromSignature(vh) -} - -func (ac *apeChecker) getActorAndPKFromSignature(vh *session.RequestVerificationHeader) (*user.ID, *keys.PublicKey, error) { - for vh.GetOrigin() != nil { - vh = vh.GetOrigin() - } - sig := vh.GetBodySignature() - if sig == nil { - return nil, nil, errEmptyBodySignature - } - key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var userID user.ID - user.IDFromKey(&userID, (ecdsa.PublicKey)(*key)) - - return &userID, key, nil -} - -func (ac *apeChecker) getSessionToken(mh *session.RequestMetaHeader) (*sessionSDK.Container, error) { - for mh.GetOrigin() != nil { - mh = mh.GetOrigin() - } - st := mh.GetSessionToken() - if st == nil { - return nil, nil - } - - var tok sessionSDK.Container - err := tok.ReadFromV2(*st) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - return &tok, nil -} - -func (ac *apeChecker) getActorAndPKFromSessionToken(st *sessionSDK.Container, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) { - if cnrID != undefinedContainerID && !st.AppliedTo(cnrID) { - return nil, nil, errSessionContainerMissmatch - } - if !st.VerifySignature() { - return nil, nil, errInvalidSessionTokenSignature - } - var tok session.Token - st.WriteToV2(&tok) - - signaturePublicKey, err := keys.NewPublicKeyFromBytes(tok.GetSignature().GetKey(), elliptic.P256()) - if err != nil { - return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err) - } - - tokenIssuer := st.Issuer() - if !isOwnerFromKey(tokenIssuer, signaturePublicKey) { - return nil, nil, errInvalidSessionTokenOwner - } - - return &tokenIssuer, signaturePublicKey, nil -} - -func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { - if key == nil { - return false - } - - var id2 user.ID - user.IDFromKey(&id2, (ecdsa.PublicKey)(*key)) - - return id2.Equals(id) -} - -func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) { - innerRingKeys, err := ac.ir.InnerRingKeys(ctx) - if err != nil { - return false, err - } - - for i := range innerRingKeys { - if bytes.Equal(innerRingKeys[i], pk) { - return true, nil - } - } - - return false, nil -} - -func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { - binCnrID := make([]byte, sha256.Size) - cnrID.Encode(binCnrID) - - nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm) - if err != nil { - return false, err - } - - if isContainerNode(nm, pk, binCnrID, cont) { - return true, nil - } - - // then check previous netmap, this can happen in-between epoch change - // when node migrates data from last epoch container - nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm) - if err != nil { - return false, err - } - - return isContainerNode(nm, pk, binCnrID, cont), nil -} - -func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool { - // It could an error only if the network map doesn't have enough nodes to - // fulfil the policy. It's a logical error that doesn't affect an actor role - // determining, so we ignore it - cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID) - - for i := range cnrVectors { - for j := range cnrVectors[i] { - if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) { - return true - } - } - } - - return false -} - -func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { - var ownerSDK user.ID - if owner == nil { - return "", errOwnerIDIsNotSet - } - if err := ownerSDK.ReadFromV2(*owner); err != nil { - return "", err - } - addr := ownerSDK.ScriptHash() - - namespace := "" - subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) - if err == nil { - namespace = subject.Namespace - } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) - } - return namespace, nil -} - -func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { - var ownerSDK user.ID - if owner == nil { - return "", errOwnerIDIsNotSet - } - if err := ownerSDK.ReadFromV2(*owner); err != nil { - return "", err - } - addr := ownerSDK.ScriptHash() - subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) - if err != nil { - return "", fmt.Errorf("get subject error: %w", err) - } - return subject.Namespace, nil -} - -// validateNamespace validates a namespace set in a container. -// If frostfs-id contract stores a namespace N1 for an owner ID and a container within a request -// is set with namespace N2 (via Zone() property), then N2 is invalid and the request is denied. -func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) error { - if cnrV2 == nil { - return nil - } - var cnr cnrSDK.Container - if err := cnr.ReadFromV2(*cnrV2); err != nil { - return err - } - cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr).Zone(), ".ns") - if hasNamespace { - if cntNamespace != ownerIDNamespace { - if ownerIDNamespace == "" { - return errInvalidDomainZone - } - return fmt.Errorf("invalid domain zone: expected namespace %s, but got %s", ownerIDNamespace, cntNamespace) - } - } else if ownerIDNamespace != "" { - return fmt.Errorf("invalid domain zone: expected namespace %s, but got invalid or empty", ownerIDNamespace) - } - return nil -} - -// validateNamespace validates if a namespace of a request actor equals to owner's namespace. -// An actor's namespace is calculated by a public key. -func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error { - var actor user.ID - user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) - actorOwnerID := new(refs.OwnerID) - actor.WriteToV2(actorOwnerID) - actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID) - if err != nil { - return fmt.Errorf("could not get actor namespace: %w", err) - } - if actorNamespace != ownerIDNamespace { - return fmt.Errorf("actor namespace %s differs from owner: %s", actorNamespace, ownerIDNamespace) - } - return nil -} - -// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { - if reqProps == nil { - reqProps = make(map[string]string) - } - props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk) - if err != nil { - return reqProps, err - } - for propertyName, properyValue := range props { - reqProps[propertyName] = properyValue - } - return reqProps, nil -} diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go deleted file mode 100644 index 6438c34ca..000000000 --- a/pkg/services/container/ape_test.go +++ /dev/null @@ -1,1767 +0,0 @@ -package container - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" - "net" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/peer" -) - -const ( - testDomainName = "testdomainname" - testDomainZone = "testdomainname.ns" -) - -func TestAPE(t *testing.T) { - t.Parallel() - t.Run("allow then deny get container", testAllowThenDenyGetContainerRuleDefined) - t.Run("allow by group id", TestAllowByGroupIDs) - t.Run("deny get container no rule found", testDenyGetContainerNoRuleFound) - t.Run("deny get container for others", testDenyGetContainerForOthers) - t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag) - t.Run("deny get container by IP", testDenyGetContainerByIP) - t.Run("deny get container by group id", testDenyGetContainerByGroupID) - t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken) - t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID) - t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace) - t.Run("deny list containers for owner with PK", testDenyListContainersForPK) - t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError) - t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr) - t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr) -} - -const ( - incomingIP = "192.92.33.1" -) - -func ctxWithPeerInfo() context.Context { - return peer.NewContext(context.Background(), &peer.Peer{ - Addr: &net.TCPAddr{ - IP: net.ParseIP(incomingIP), - Port: 41111, - }, - }) -} - -func testAllowThenDenyGetContainerRuleDefined(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - addDefaultAllowGetPolicy(t, router, contID) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - _, err = apeSrv.Get(context.Background(), req) - require.NoError(t, err) - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - }, - }, - }) - require.NoError(t, err) - - resp, err := apeSrv.Get(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) -} - -func TestAllowByGroupIDs(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 1, - Name: "Group#1", - }, - }, - }, - }, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.GroupTarget(":1"), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: commonschema.PropertyKeyFrostFSIDGroupID, - Value: "1", - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - resp, err := apeSrv.Get(context.Background(), req) - require.NotNil(t, resp) - require.NoError(t, err) -} - -func testDenyGetContainerNoRuleFound(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.NoRuleFound.String()) -} - -func testDenyGetContainerForOthers(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyGetContainerByUserClaimTag(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr1"), - Value: "value100", - Op: chain.CondStringNotEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyGetContainerByIP(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: commonschema.PropertyKeyFrostFSSourceIP, - Value: incomingIP + "/16", - Op: chain.CondIPAddress, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(ctxWithPeerInfo(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) -} - -func testDenyGetContainerSysZoneAttr(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - testContainer.SetAttribute(container.SysAttributeZone, "eggplant") - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindResource, - Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), - Value: "eggplant", - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(ctxWithPeerInfo(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) -} - -func testDenyPutContainerSysZoneAttr(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - testContainer.SetAttribute(container.SysAttributeZone, "eggplant") - contRdr.c[contID] = &containercore.Container{Value: testContainer} - owner := testContainer.Owner() - ownerAddr := owner.ScriptHash() - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - ownerAddr: {}, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - ownerAddr: {}, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodPutContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - nativeschema.ResourceFormatRootContainers, - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindResource, - Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), - Value: "eggplant", - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initPutRequest(t, testContainer) - - resp, err := apeSrv.Put(ctxWithPeerInfo(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) -} - -func testDenyGetContainerByGroupID(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: commonschema.PropertyKeyFrostFSIDGroupID, - Value: "19888", - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyPutContainerForOthersSessionToken(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - testContainer := containertest.Container() - owner := testContainer.Owner() - ownerAddr := owner.ScriptHash() - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - ownerAddr: {}, - }, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodPutContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - nativeschema.ResourceFormatRootContainers, - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initPutRequest(t, testContainer) - - resp, err := apeSrv.Put(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - cnrID, testContainer := initTestContainer(t, true) - contRdr.c[cnrID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodPutContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(testContainer) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - ownerScriptHash: { - Namespace: testDomainName, - Name: testDomainName, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - ownerScriptHash: { - Namespace: testDomainName, - Name: testDomainName, - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - resp, err := apeSrv.Put(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyPutContainerInvalidNamespace(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - cnrID, testContainer := initTestContainer(t, false) - var domain cnrSDK.Domain - domain.SetName("incorrect" + testDomainName) - domain.SetZone("incorrect" + testDomainZone) - cnrSDK.WriteDomain(&testContainer, domain) - contRdr.c[cnrID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodPutContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(testContainer) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - ownerScriptHash: { - Namespace: testDomainName, - Name: testDomainName, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - ownerScriptHash: { - Namespace: testDomainName, - Name: testDomainName, - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - resp, err := apeSrv.Put(context.Background(), req) - require.Nil(t, resp) - require.ErrorContains(t, err, "invalid domain zone") -} - -func testDenyListContainersForPK(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - } - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodListContainers, - }, - }, - Resources: chain.Resources{ - Names: []string{ - nativeschema.ResourceFormatRootContainers, - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorPublicKey, - Value: hex.EncodeToString(pk.PublicKey().Bytes()), - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - var userID user.ID - user.IDFromKey(&userID, pk.PrivateKey.PublicKey) - - req := &container.ListRequest{} - req.SetBody(&container.ListRequestBody{}) - var ownerID refs.OwnerID - userID.WriteToV2(&ownerID) - req.GetBody().SetOwnerID(&ownerID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.List(context.Background(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) -} - -func testDenyListContainersValidationNamespaceError(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - actorPK, err := keys.NewPrivateKey() - require.NoError(t, err) - - ownerPK, err := keys.NewPrivateKey() - require.NoError(t, err) - - actorScriptHash, ownerScriptHash := initActorOwnerScriptHashes(t, actorPK, ownerPK) - - const actorDomain = "actor" + testDomainName - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - actorScriptHash: { - Namespace: actorDomain, - Name: actorDomain, - }, - ownerScriptHash: { - Namespace: testDomainName, - Name: testDomainName, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - actorScriptHash: { - Namespace: actorDomain, - Name: actorDomain, - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19777, - }, - }, - }, - ownerScriptHash: { - Namespace: testDomainName, - Name: testDomainName, - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodListContainers, - }, - }, - Resources: chain.Resources{ - Names: []string{ - nativeschema.ResourceFormatRootContainers, - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorPublicKey, - Value: actorPK.PublicKey().String(), - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initListRequest(t, actorPK, ownerPK) - - resp, err := apeSrv.List(context.Background(), req) - require.Nil(t, resp) - require.ErrorContains(t, err, "actor namespace "+actorDomain+" differs") -} - -type srvStub struct { - calls map[string]int -} - -func (s *srvStub) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) { - s.calls["Delete"]++ - return &container.DeleteResponse{}, nil -} - -func (s *srvStub) Get(context.Context, *container.GetRequest) (*container.GetResponse, error) { - s.calls["Get"]++ - return &container.GetResponse{}, nil -} - -func (s *srvStub) List(context.Context, *container.ListRequest) (*container.ListResponse, error) { - s.calls["List"]++ - return &container.ListResponse{}, nil -} - -func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error { - s.calls["ListStream"]++ - return nil -} - -func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) { - s.calls["Put"]++ - return &container.PutResponse{}, nil -} - -type irStub struct { - keys [][]byte -} - -func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) { - return s.keys, nil -} - -type containerStub struct { - c map[cid.ID]*containercore.Container -} - -func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) { - if v, ok := s.c[id]; ok { - return v, nil - } - return nil, errors.New("container not found") -} - -type netmapStub struct { - netmaps map[uint64]*netmap.NetMap - currentEpoch uint64 -} - -func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - if diff >= s.currentEpoch { - return nil, errors.New("invalid diff") - } - return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) -} - -func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.netmaps[epoch]; found { - return nm, nil - } - return nil, errors.New("netmap not found") -} - -func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { - return s.currentEpoch, nil -} - -type frostfsidStub struct { - subjects map[util.Uint160]*client.Subject - subjectsExt map[util.Uint160]*client.SubjectExtended -} - -func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) { - s, ok := f.subjects[owner] - if !ok { - return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) - } - return s, nil -} - -func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) { - s, ok := f.subjectsExt[owner] - if !ok { - return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) - } - return s, nil -} - -type testAPEServer struct { - engine engine.Engine - - containerReader *containerStub - - ir *irStub - - netmap *netmapStub - - frostfsIDSubjectReader *frostfsidStub - - apeChecker *apeChecker -} - -func newTestAPEServer() testAPEServer { - srv := &srvStub{ - calls: map[string]int{}, - } - - engine := inmemory.NewInMemory() - - containerReader := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - - ir := &irStub{ - keys: [][]byte{}, - } - - netmap := &netmapStub{} - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{}, - subjectsExt: map[util.Uint160]*client.SubjectExtended{}, - } - - apeChecker := &apeChecker{ - router: engine, - reader: containerReader, - ir: ir, - nm: netmap, - frostFSIDClient: frostfsIDSubjectReader, - next: srv, - } - - return testAPEServer{ - engine: engine, - containerReader: containerReader, - ir: ir, - netmap: netmap, - frostfsIDSubjectReader: frostfsIDSubjectReader, - apeChecker: apeChecker, - } -} - -func TestValidateContainerBoundedOperation(t *testing.T) { - t.Parallel() - - t.Run("check root-defined container in root-defined container target rule", func(t *testing.T) { - t.Parallel() - - components := newTestAPEServer() - contID, testContainer := initTestContainer(t, false) - components.containerReader.c[contID] = &containercore.Container{Value: testContainer} - initTestNetmap(components.netmap) - - _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initTestGetContainerRequest(t, contID) - - err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer) - aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied) - require.ErrorContains(t, err, aErr.Error()) - }) - - t.Run("check root-defined container in testdomain-defined container target rule", func(t *testing.T) { - t.Parallel() - - components := newTestAPEServer() - contID, testContainer := initTestContainer(t, false) - components.containerReader.c[contID] = &containercore.Container{Value: testContainer} - initTestNetmap(components.netmap) - - _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, testDomainName, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - addDefaultAllowGetPolicy(t, components.engine, contID) - - req := initTestGetContainerRequest(t, contID) - - err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer) - require.NoError(t, err) - }) - - t.Run("check root-defined container in testdomain namespace target rule", func(t *testing.T) { - t.Parallel() - - components := newTestAPEServer() - contID, testContainer := initTestContainer(t, false) - components.containerReader.c[contID] = &containercore.Container{Value: testContainer} - initTestNetmap(components.netmap) - - _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - addDefaultAllowGetPolicy(t, components.engine, contID) - - req := initTestGetContainerRequest(t, contID) - - err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer) - require.NoError(t, err) - }) - - t.Run("check testdomain-defined container in root-defined container target rule", func(t *testing.T) { - t.Parallel() - - components := newTestAPEServer() - contID, testContainer := initTestContainer(t, true) - components.containerReader.c[contID] = &containercore.Container{Value: testContainer} - initTestNetmap(components.netmap) - - _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - addDefaultAllowGetPolicy(t, components.engine, contID) - - req := initTestGetContainerRequest(t, contID) - - err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer) - require.NoError(t, err) - }) - - t.Run("check testdomain-defined container in testdomain-defined container target rule", func(t *testing.T) { - t.Parallel() - - components := newTestAPEServer() - contID, testContainer := initTestContainer(t, true) - components.containerReader.c[contID] = &containercore.Container{Value: testContainer} - initTestNetmap(components.netmap) - - _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, testDomainName, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - addDefaultAllowGetPolicy(t, components.engine, contID) - - req := initTestGetContainerRequest(t, contID) - - err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer) - aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied) - require.ErrorContains(t, err, aErr.Error()) - }) - - t.Run("check testdomain-defined container in testdomain namespace target rule", func(t *testing.T) { - t.Parallel() - - components := newTestAPEServer() - contID, testContainer := initTestContainer(t, true) - components.containerReader.c[contID] = &containercore.Container{Value: testContainer} - initTestNetmap(components.netmap) - - _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initTestGetContainerRequest(t, contID) - - err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer) - aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied) - require.ErrorContains(t, err, aErr.Error()) - }) -} - -func initTestGetContainerRequest(t *testing.T, contID cid.ID) *container.GetRequest { - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - return req -} - -func initTestNetmap(netmapStub *netmapStub) { - netmapStub.currentEpoch = 100 - netmapStub.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(netmapStub.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - netmapStub.netmaps[netmapStub.currentEpoch] = &testNetmap - netmapStub.netmaps[netmapStub.currentEpoch-1] = &testNetmap -} - -func initTestContainer(t *testing.T, isDomainSet bool) (cid.ID, cnrSDK.Container) { - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - if isDomainSet { - // no domain defined -> container is defined in root namespace - var domain cnrSDK.Domain - domain.SetName(testDomainName) - domain.SetZone(testDomainZone) - cnrSDK.WriteDomain(&testContainer, domain) - } - return contID, testContainer -} - -func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.PutRequest { - req := &container.PutRequest{} - req.SetBody(&container.PutRequestBody{}) - var reqCont container.Container - testContainer.WriteToV2(&reqCont) - req.GetBody().SetContainer(&reqCont) - - sessionPK, err := keys.NewPrivateKey() - require.NoError(t, err) - sToken := sessiontest.ContainerSigned() - sToken.ApplyOnlyTo(cid.ID{}) - require.NoError(t, sToken.Sign(sessionPK.PrivateKey)) - var sTokenV2 session.Token - sToken.WriteToV2(&sTokenV2) - metaHeader := new(session.RequestMetaHeader) - metaHeader.SetSessionToken(&sTokenV2) - req.SetMetaHeader(metaHeader) - - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - return req -} - -func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 { - var ownerSDK *user.ID - owner := testContainer.Owner() - ownerSDK = &owner - return ownerSDK.ScriptHash() -} - -func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) { - var actorUserID user.ID - user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey())) - actorScriptHash = actorUserID.ScriptHash() - - var ownerUserID user.ID - user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey())) - ownerScriptHash = ownerUserID.ScriptHash() - require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String()) - return -} - -func initListRequest(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) *container.ListRequest { - var ownerUserID user.ID - user.IDFromKey(&ownerUserID, ownerPK.PrivateKey.PublicKey) - - req := &container.ListRequest{} - req.SetBody(&container.ListRequestBody{}) - var ownerID refs.OwnerID - ownerUserID.WriteToV2(&ownerID) - req.GetBody().SetOwnerID(&ownerID) - - require.NoError(t, signature.SignServiceMessage(&actorPK.PrivateKey, req)) - return req -} - -func addDefaultAllowGetPolicy(t *testing.T, e engine.Engine, contID cid.ID) { - _, _, err := e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - nativeschema.ResourceFormatAllContainers, - }, - }, - }, - }, - }) - require.NoError(t, err) -} diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go deleted file mode 100644 index b235efa3c..000000000 --- a/pkg/services/container/audit.go +++ /dev/null @@ -1,86 +0,0 @@ -package container - -import ( - "context" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -var _ Server = (*auditService)(nil) - -type auditService struct { - next Server - log *logger.Logger - enabled *atomic.Bool -} - -func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Server { - return &auditService{ - next: next, - log: log, - enabled: enabled, - } -} - -// Delete implements Server. -func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { - res, err := a.next.Delete(ctx, req) - if !a.enabled.Load() { - return res, err - } - - audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) - - return res, err -} - -// Get implements Server. -func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { - res, err := a.next.Get(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) - return res, err -} - -// List implements Server. -func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { - res, err := a.next.List(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) - return res, err -} - -// ListStream implements Server. -func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error { - err := a.next.ListStream(req, stream) - if !a.enabled.Load() { - return err - } - audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) - return err -} - -// Put implements Server. -func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { - res, err := a.next.Put(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req, - audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil) - return res, err -} diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go deleted file mode 100644 index cdd0d2514..000000000 --- a/pkg/services/container/executor.go +++ /dev/null @@ -1,104 +0,0 @@ -package container - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" -) - -type ServiceExecutor interface { - Put(context.Context, *session.Token, *container.PutRequestBody) (*container.PutResponseBody, error) - Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error) - Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error) - List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error) - ListStream(context.Context, *container.ListStreamRequest, ListStream) error -} - -type executorSvc struct { - Server - - exec ServiceExecutor - - respSvc *response.Service -} - -// NewExecutionService wraps ServiceExecutor and returns Container Service interface. -func NewExecutionService(exec ServiceExecutor, respSvc *response.Service) Server { - return &executorSvc{ - exec: exec, - respSvc: respSvc, - } -} - -func (s *executorSvc) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { - meta := req.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - respBody, err := s.exec.Put(ctx, meta.GetSessionToken(), req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute Put request: %w", err) - } - - resp := new(container.PutResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { - meta := req.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - respBody, err := s.exec.Delete(ctx, meta.GetSessionToken(), req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute Delete request: %w", err) - } - - resp := new(container.DeleteResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { - respBody, err := s.exec.Get(ctx, req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute Get request: %w", err) - } - - resp := new(container.GetResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { - respBody, err := s.exec.List(ctx, req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute List request: %w", err) - } - - resp := new(container.ListResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error { - err := s.exec.ListStream(stream.Context(), req, stream) - if err != nil { - return fmt.Errorf("could not execute ListStream request: %w", err) - } - return nil -} diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go deleted file mode 100644 index eaa608eba..000000000 --- a/pkg/services/container/morph/executor.go +++ /dev/null @@ -1,256 +0,0 @@ -package container - -import ( - "context" - "errors" - "fmt" - - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -var errMissingUserID = errors.New("missing user ID") - -type morphExecutor struct { - rdr Reader - wrt Writer -} - -// Reader is an interface of read-only container storage. -type Reader interface { - containercore.Source - - // ContainersOf returns a list of container identifiers belonging - // to the specified user of FrostFS system. Returns the identifiers - // of all FrostFS containers if pointer to owner identifier is nil. - ContainersOf(context.Context, *user.ID) ([]cid.ID, error) - IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error -} - -// Writer is an interface of container storage updater. -type Writer interface { - // Put stores specified container in the side chain. - Put(context.Context, containercore.Container) (*cid.ID, error) - // Delete removes specified container from the side chain. - Delete(context.Context, containercore.RemovalWitness) error -} - -func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { - return &morphExecutor{ - rdr: rdr, - wrt: wrt, - } -} - -func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { - sigV2 := body.GetSignature() - if sigV2 == nil { - // TODO(@cthulhu-rider): #468 use "const" error - return nil, errors.New("missing signature") - } - - cnrV2 := body.GetContainer() - if cnrV2 == nil { - return nil, errors.New("missing container field") - } - - var cnr containercore.Container - - err := cnr.Value.ReadFromV2(*cnrV2) - if err != nil { - return nil, fmt.Errorf("invalid container: %w", err) - } - - err = cnr.Signature.ReadFromV2(*sigV2) - if err != nil { - return nil, fmt.Errorf("can't read signature: %w", err) - } - - if tokV2 != nil { - cnr.Session = new(session.Container) - - err := cnr.Session.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - } - - idCnr, err := s.wrt.Put(ctx, cnr) - if err != nil { - return nil, err - } - - var idCnrV2 refs.ContainerID - idCnr.WriteToV2(&idCnrV2) - - res := new(container.PutResponseBody) - res.SetContainerID(&idCnrV2) - - return res, nil -} - -func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { - idV2 := body.GetContainerID() - if idV2 == nil { - return nil, errors.New("missing container ID") - } - - var id cid.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid container ID: %w", err) - } - - var tok *session.Container - - if tokV2 != nil { - tok = new(session.Container) - - err := tok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - } - - var rmWitness containercore.RemovalWitness - - rmWitness.ContainerID = id - rmWitness.Signature = body.GetSignature() - rmWitness.SessionToken = tok - - err = s.wrt.Delete(ctx, rmWitness) - if err != nil { - return nil, err - } - - return new(container.DeleteResponseBody), nil -} - -func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { - idV2 := body.GetContainerID() - if idV2 == nil { - return nil, errors.New("missing container ID") - } - - var id cid.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid container ID: %w", err) - } - - cnr, err := s.rdr.Get(ctx, id) - if err != nil { - return nil, err - } - - sigV2 := new(refs.Signature) - cnr.Signature.WriteToV2(sigV2) - - var tokV2 *sessionV2.Token - - if cnr.Session != nil { - tokV2 = new(sessionV2.Token) - - cnr.Session.WriteToV2(tokV2) - } - - var cnrV2 container.Container - cnr.Value.WriteToV2(&cnrV2) - - res := new(container.GetResponseBody) - res.SetContainer(&cnrV2) - res.SetSignature(sigV2) - res.SetSessionToken(tokV2) - - return res, nil -} - -func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { - idV2 := body.GetOwnerID() - if idV2 == nil { - return nil, errMissingUserID - } - - var id user.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid user ID: %w", err) - } - - cnrs, err := s.rdr.ContainersOf(ctx, &id) - if err != nil { - return nil, err - } - - cidList := make([]refs.ContainerID, len(cnrs)) - for i := range cnrs { - cnrs[i].WriteToV2(&cidList[i]) - } - - res := new(container.ListResponseBody) - res.SetContainerIDs(cidList) - - return res, nil -} - -func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error { - body := req.GetBody() - idV2 := body.GetOwnerID() - if idV2 == nil { - return errMissingUserID - } - - var id user.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return fmt.Errorf("invalid user ID: %w", err) - } - - resBody := new(container.ListStreamResponseBody) - r := new(container.ListStreamResponse) - r.SetBody(resBody) - - var cidList []refs.ContainerID - - // Amount of containers to send at once. - const batchSize = 1000 - - processCID := func(id cid.ID) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var refID refs.ContainerID - id.WriteToV2(&refID) - cidList = append(cidList, refID) - if len(cidList) == batchSize { - r.GetBody().SetContainerIDs(cidList) - cidList = cidList[:0] - return stream.Send(r) - } - return nil - } - - if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil { - return err - } - - if len(cidList) > 0 { - r.GetBody().SetContainerIDs(cidList) - return stream.Send(r) - } - - return nil -} diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go deleted file mode 100644 index 1f6fdb0be..000000000 --- a/pkg/services/container/morph/executor_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package container_test - -import ( - "context" - "testing" - - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" - containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type mock struct { - containerSvcMorph.Reader -} - -func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) { - return new(cid.ID), nil -} - -func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error { - return nil -} - -func TestInvalidToken(t *testing.T) { - m := mock{} - e := containerSvcMorph.NewExecutor(m, m) - - cnr := cidtest.ID() - - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - - sign := func(reqBody interface { - StableMarshal([]byte) []byte - SetSignature(signature *refs.Signature) - }, - ) { - signer := frostfsecdsa.Signer(priv.PrivateKey) - var sig frostfscrypto.Signature - require.NoError(t, sig.Calculate(signer, reqBody.StableMarshal(nil))) - - var sigV2 refs.Signature - sig.WriteToV2(&sigV2) - reqBody.SetSignature(&sigV2) - } - - var tokV2 session.Token - sessiontest.ContainerSigned().WriteToV2(&tokV2) - - tests := []struct { - name string - op func(e containerSvc.ServiceExecutor, tokV2 *session.Token) error - }{ - { - name: "put", - op: func(e containerSvc.ServiceExecutor, tokV2 *session.Token) (err error) { - var reqBody container.PutRequestBody - - cnr := containertest.Container() - - var cnrV2 container.Container - cnr.WriteToV2(&cnrV2) - - reqBody.SetContainer(&cnrV2) - sign(&reqBody) - - _, err = e.Put(context.TODO(), tokV2, &reqBody) - return - }, - }, - { - name: "delete", - op: func(e containerSvc.ServiceExecutor, tokV2 *session.Token) (err error) { - var reqBody container.DeleteRequestBody - reqBody.SetContainerID(&cnrV2) - - _, err = e.Delete(context.TODO(), tokV2, &reqBody) - return - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - tok := generateToken(new(session.ObjectSessionContext)) - require.Error(t, test.op(e, tok)) - - require.NoError(t, test.op(e, &tokV2)) - - require.NoError(t, test.op(e, nil)) - }) - } -} - -func generateToken(ctx session.TokenContext) *session.Token { - body := new(session.TokenBody) - body.SetContext(ctx) - - tok := new(session.Token) - tok.SetBody(body) - - return tok -} diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go deleted file mode 100644 index d9208077d..000000000 --- a/pkg/services/container/server.go +++ /dev/null @@ -1,23 +0,0 @@ -package container - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" -) - -// Server is an interface of the FrostFS API Container service server. -type Server interface { - Put(context.Context, *container.PutRequest) (*container.PutResponse, error) - Get(context.Context, *container.GetRequest) (*container.GetResponse, error) - Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) - List(context.Context, *container.ListRequest) (*container.ListResponse, error) - ListStream(*container.ListStreamRequest, ListStream) error -} - -// ListStream is an interface of FrostFS API v2 compatible search streamer. -type ListStream interface { - util.ServerStream - Send(*container.ListStreamResponse) error -} diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go deleted file mode 100644 index 85fe7ae87..000000000 --- a/pkg/services/container/sign.go +++ /dev/null @@ -1,95 +0,0 @@ -package container - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" -) - -type signService struct { - sigSvc *util.SignService - - svc Server -} - -func NewSignService(key *ecdsa.PrivateKey, svc Server) Server { - return &signService{ - sigSvc: util.NewUnarySignService(key), - svc: svc, - } -} - -func (s *signService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.PutResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Put(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.DeleteResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Delete(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.GetResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Get(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.ListResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(container.ListStreamResponse) - _ = s.sigSvc.SignResponse(resp, err) - return stream.Send(resp) - } - - ss := &listStreamSigner{ - ListStream: stream, - sigSvc: s.sigSvc, - } - err := s.svc.ListStream(req, ss) - if err != nil || !ss.nonEmptyResp { - return ss.send(new(container.ListStreamResponse), err) - } - return nil -} - -type listStreamSigner struct { - ListStream - sigSvc *util.SignService - - nonEmptyResp bool // set on first Send call -} - -func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error { - s.nonEmptyResp = true - return s.send(resp, nil) -} - -func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error { - if err := s.sigSvc.SignResponse(resp, err); err != nil { - return err - } - return s.ListStream.Send(resp) -} diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go deleted file mode 100644 index 4f8708da7..000000000 --- a/pkg/services/container/transport_splitter.go +++ /dev/null @@ -1,92 +0,0 @@ -package container - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" -) - -type ( - TransportSplitter struct { - next Server - - respSvc *response.Service - cnrAmount uint32 - } - - listStreamMsgSizeCtrl struct { - util.ServerStream - stream ListStream - respSvc *response.Service - cnrAmount uint32 - } -) - -func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server { - return &TransportSplitter{ - next: next, - respSvc: respSvc, - cnrAmount: cnrAmount, - } -} - -func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { - return s.next.Put(ctx, req) -} - -func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { - return s.next.Delete(ctx, req) -} - -func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { - return s.next.Get(ctx, req) -} - -func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { - return s.next.List(ctx, req) -} - -func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error { - return s.next.ListStream(req, &listStreamMsgSizeCtrl{ - ServerStream: stream, - stream: stream, - respSvc: s.respSvc, - cnrAmount: s.cnrAmount, - }) -} - -func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error { - s.respSvc.SetMeta(resp) - body := resp.GetBody() - ids := body.GetContainerIDs() - - var newResp *container.ListStreamResponse - - for { - if newResp == nil { - newResp = new(container.ListStreamResponse) - newResp.SetBody(body) - } - - cut := min(s.cnrAmount, uint32(len(ids))) - - body.SetContainerIDs(ids[:cut]) - newResp.SetMetaHeader(resp.GetMetaHeader()) - newResp.SetVerificationHeader(resp.GetVerificationHeader()) - - if err := s.stream.Send(newResp); err != nil { - return fmt.Errorf("TransportSplitter: %w", err) - } - - ids = ids[cut:] - - if len(ids) == 0 { - break - } - } - - return nil -} diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go deleted file mode 100644 index 37daf67be..000000000 --- a/pkg/services/control/convert.go +++ /dev/null @@ -1,39 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message" -) - -type requestWrapper struct { - message.Message - m grpc.Message -} - -func (w *requestWrapper) ToGRPCMessage() grpc.Message { - return w.m -} - -type responseWrapper[T grpc.Message] struct { - message *T -} - -func newResponseWrapper[T grpc.Message]() *responseWrapper[T] { - return &responseWrapper[T]{ - message: new(T), - } -} - -func (w *responseWrapper[T]) ToGRPCMessage() grpc.Message { - return w.message -} - -func (w *responseWrapper[T]) FromGRPCMessage(m grpc.Message) error { - response, ok := m.(*T) - if !ok { - return message.NewUnexpectedMessageType(m, w.message) - } - - w.message = response - return nil -} diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go deleted file mode 100644 index 024676b87..000000000 --- a/pkg/services/control/ir/convert.go +++ /dev/null @@ -1,34 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message" -) - -type requestWrapper struct { - message.Message - m grpc.Message -} - -func (w *requestWrapper) ToGRPCMessage() grpc.Message { - return w.m -} - -type responseWrapper[M grpc.Message] struct { - m M -} - -func (w *responseWrapper[M]) ToGRPCMessage() grpc.Message { - return w.m -} - -func (w *responseWrapper[M]) FromGRPCMessage(m grpc.Message) error { - var ok bool - - w.m, ok = m.(M) - if !ok { - return message.NewUnexpectedMessageType(m, w.m) - } - - return nil -} diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go deleted file mode 100644 index 62f800d99..000000000 --- a/pkg/services/control/ir/rpc.go +++ /dev/null @@ -1,68 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" -) - -const serviceName = "ircontrol.ControlService" - -const ( - rpcHealthCheck = "HealthCheck" - rpcTickEpoch = "TickEpoch" - rpcRemoveNode = "RemoveNode" - rpcRemoveContainer = "RemoveContainer" -) - -// HealthCheck executes ControlService.HealthCheck RPC. -func HealthCheck( - cli *client.Client, - req *HealthCheckRequest, - opts ...client.CallOption, -) (*HealthCheckResponse, error) { - return sendUnary[HealthCheckRequest, HealthCheckResponse](cli, rpcHealthCheck, req, opts...) -} - -// TickEpoch executes ControlService.TickEpoch RPC. -func TickEpoch( - cli *client.Client, - req *TickEpochRequest, - opts ...client.CallOption, -) (*TickEpochResponse, error) { - return sendUnary[TickEpochRequest, TickEpochResponse](cli, rpcTickEpoch, req, opts...) -} - -func RemoveNode( - cli *client.Client, - req *RemoveNodeRequest, - opts ...client.CallOption, -) (*RemoveNodeResponse, error) { - return sendUnary[RemoveNodeRequest, RemoveNodeResponse](cli, rpcRemoveNode, req, opts...) -} - -func RemoveContainer( - cli *client.Client, - req *RemoveContainerRequest, - opts ...client.CallOption, -) (*RemoveContainerResponse, error) { - return sendUnary[RemoveContainerRequest, RemoveContainerResponse](cli, rpcRemoveContainer, req, opts...) -} - -func sendUnary[I, O grpc.Message](cli *client.Client, rpcName string, req *I, opts ...client.CallOption) (*O, error) { - var resp O - wResp := &responseWrapper[*O]{ - m: &resp, - } - - wReq := &requestWrapper{ - m: req, - } - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcName), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.m, nil -} diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go deleted file mode 100644 index d9f65a2fc..000000000 --- a/pkg/services/control/ir/server/audit.go +++ /dev/null @@ -1,108 +0,0 @@ -package control - -import ( - "context" - "encoding/hex" - "strings" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -var _ control.ControlServiceServer = (*auditService)(nil) - -type auditService struct { - next *Server - log *logger.Logger - enabled *atomic.Bool -} - -func NewAuditService(next *Server, log *logger.Logger, enabled *atomic.Bool) control.ControlServiceServer { - return &auditService{ - next: next, - log: log, - enabled: enabled, - } -} - -// HealthCheck implements control.ControlServiceServer. -func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) { - res, err := a.next.HealthCheck(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) - return res, err -} - -// RemoveContainer implements control.ControlServiceServer. -func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { - res, err := a.next.RemoveContainer(ctx, req) - if !a.enabled.Load() { - return res, err - } - - sb := &strings.Builder{} - var withConatiner bool - if len(req.GetBody().GetContainerId()) > 0 { - withConatiner = true - sb.WriteString("containerID:") - var containerID cid.ID - if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil { - sb.WriteString(audit.InvalidValue) - } else { - sb.WriteString(containerID.EncodeToString()) - } - } - - if len(req.GetBody().GetOwner()) > 0 { - if withConatiner { - sb.WriteString(";") - } - sb.WriteString("owner:") - - var ownerID refs.OwnerID - if err := ownerID.Unmarshal(req.GetBody().GetOwner()); err != nil { - sb.WriteString(audit.InvalidValue) - } else { - var owner user.ID - if err := owner.ReadFromV2(ownerID); err != nil { - sb.WriteString(audit.InvalidValue) - } else { - sb.WriteString(owner.EncodeToString()) - } - } - } - - audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) - return res, err -} - -// RemoveNode implements control.ControlServiceServer. -func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { - res, err := a.next.RemoveNode(ctx, req) - if !a.enabled.Load() { - return res, err - } - - audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), - audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil) - return res, err -} - -// TickEpoch implements control.ControlServiceServer. -func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { - res, err := a.next.TickEpoch(ctx, req) - if !a.enabled.Load() { - return res, err - } - - audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), - nil, err == nil) - return res, err -} diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go deleted file mode 100644 index 0509d2646..000000000 --- a/pkg/services/control/ir/server/calls.go +++ /dev/null @@ -1,176 +0,0 @@ -package control - -import ( - "bytes" - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// HealthCheck returns health status of the local IR node. -// -// If request is not signed with a key from white list, permission error returns. -func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - resp := new(control.HealthCheckResponse) - - body := new(control.HealthCheckResponse_Body) - resp.SetBody(body) - - body.SetHealthStatus(s.prm.healthChecker.HealthStatus()) - - if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -// TickEpoch forces a new epoch. -// -// If request is not signed with a key from white list, permission error returns. -func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - resp := new(control.TickEpochResponse) - resp.SetBody(new(control.TickEpochResponse_Body)) - - epoch, err := s.netmapClient.Epoch(ctx) - if err != nil { - return nil, fmt.Errorf("getting current epoch: %w", err) - } - - vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub()) - if err != nil { - return nil, fmt.Errorf("forcing new epoch: %w", err) - } - resp.Body.Vub = vub - - if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -// RemoveNode forces a node removal. -// -// If request is not signed with a key from white list, permission error returns. -func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - resp := new(control.RemoveNodeResponse) - resp.SetBody(new(control.RemoveNodeResponse_Body)) - - nm, err := s.netmapClient.NetMap(ctx) - if err != nil { - return nil, fmt.Errorf("getting netmap: %w", err) - } - var nodeInfo netmap.NodeInfo - for _, info := range nm.Nodes() { - if bytes.Equal(info.PublicKey(), req.GetBody().GetKey()) { - nodeInfo = info - break - } - } - if len(nodeInfo.PublicKey()) == 0 { - return nil, status.Error(codes.NotFound, "no such node") - } - if nodeInfo.Status().IsOffline() { - return nil, status.Error(codes.FailedPrecondition, "node is already offline") - } - - vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub()) - if err != nil { - return nil, fmt.Errorf("forcing node removal: %w", err) - } - resp.Body.Vub = vub - - if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -// RemoveContainer forces a container removal. -func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - if len(req.GetBody().GetContainerId()) > 0 && len(req.GetBody().GetOwner()) > 0 { - return nil, status.Error(codes.InvalidArgument, "specify the owner and container at the same time is not allowed") - } - var vub uint32 - if len(req.GetBody().GetContainerId()) > 0 { - var containerID cid.ID - if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil { - return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error()) - } - var err error - vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) - if err != nil { - return nil, err - } - } else { - var ownerID refs.OwnerID - if err := ownerID.Unmarshal(req.GetBody().GetOwner()); err != nil { - return nil, status.Error(codes.InvalidArgument, "failed to parse ownerID: %s"+err.Error()) - } - var owner user.ID - if err := owner.ReadFromV2(ownerID); err != nil { - return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error()) - } - - cids, err := s.containerClient.ContainersOf(ctx, &owner) - if err != nil { - return nil, fmt.Errorf("failed to get owner's containers: %w", err) - } - - for _, containerID := range cids { - vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) - if err != nil { - return nil, err - } - } - } - - resp := &control.RemoveContainerResponse{ - Body: &control.RemoveContainerResponse_Body{ - Vub: vub, - }, - } - if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) { - var prm container.DeletePrm - prm.SetCID(containerID[:]) - prm.SetControlTX(true) - prm.SetVUB(vub) - - vub, err := s.containerClient.Delete(ctx, prm) - if err != nil { - return 0, fmt.Errorf("forcing container removal: %w", err) - } - return vub, nil -} diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go deleted file mode 100644 index 9d5cfefc8..000000000 --- a/pkg/services/control/ir/server/deps.go +++ /dev/null @@ -1,13 +0,0 @@ -package control - -import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - -// HealthChecker is component interface for calculating -// the current health status of a node. -type HealthChecker interface { - // HealthStatus must calculate and return current health status of the IR application. - // - // If status can not be calculated for any reason, - // control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned. - HealthStatus() control.HealthStatus -} diff --git a/pkg/services/control/ir/server/opts.go b/pkg/services/control/ir/server/opts.go deleted file mode 100644 index 40bdccb67..000000000 --- a/pkg/services/control/ir/server/opts.go +++ /dev/null @@ -1,20 +0,0 @@ -package control - -// Option specifies Server's optional parameter. -type Option func(*options) - -type options struct { - allowedKeys [][]byte -} - -func defaultOptions() *options { - return new(options) -} - -// WithAllowedKeys returns option to add public keys -// to white list of the Control service. -func WithAllowedKeys(keys [][]byte) Option { - return func(o *options) { - o.allowedKeys = append(o.allowedKeys, keys...) - } -} diff --git a/pkg/services/control/ir/server/prm.go b/pkg/services/control/ir/server/prm.go deleted file mode 100644 index b4f1f2517..000000000 --- a/pkg/services/control/ir/server/prm.go +++ /dev/null @@ -1,24 +0,0 @@ -package control - -import ( - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// Prm groups required parameters of -// Server's constructor. -type Prm struct { - key keys.PrivateKey - - healthChecker HealthChecker -} - -// SetPrivateKey sets private key to sign responses. -func (x *Prm) SetPrivateKey(key keys.PrivateKey) { - x.key = key -} - -// SetHealthChecker sets HealthChecker to calculate -// health status. -func (x *Prm) SetHealthChecker(hc HealthChecker) { - x.healthChecker = hc -} diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go deleted file mode 100644 index 0cfca71c1..000000000 --- a/pkg/services/control/ir/server/server.go +++ /dev/null @@ -1,56 +0,0 @@ -package control - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" -) - -// Server is an entity that serves -// Control service on IR node. -// -// To gain access to the service, any request must be -// signed with a key from the white list. -type Server struct { - prm Prm - netmapClient *netmap.Client - containerClient *container.Client - allowedKeys [][]byte -} - -func panicOnPrmValue(n string, v any) { - const invalidPrmValFmt = "invalid %s parameter (%T): %v" - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Server. -// -// Panics if: -// - parameterized private key is nil; -// - parameterized HealthChecker is nil. -// -// Forms white list from all keys specified via -// WithAllowedKeys option and a public key of -// the parameterized private key. -func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { - // verify required parameters - if prm.healthChecker == nil { - panicOnPrmValue("health checker", prm.healthChecker) - } - - // compute optional parameters - o := defaultOptions() - - for _, opt := range opts { - opt(o) - } - - return &Server{ - prm: prm, - netmapClient: netmapClient, - containerClient: containerClient, - - allowedKeys: append(o.allowedKeys, prm.key.PublicKey().Bytes()), - } -} diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go deleted file mode 100644 index d39f6d5f9..000000000 --- a/pkg/services/control/ir/server/sign.go +++ /dev/null @@ -1,97 +0,0 @@ -package control - -import ( - "bytes" - "crypto/ecdsa" - "errors" - "fmt" - - control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" -) - -// SignedMessage is an interface of Control service message. -type SignedMessage interface { - ReadSignedData([]byte) ([]byte, error) - GetSignature() *control.Signature - SetSignature(*control.Signature) -} - -var errDisallowedKey = errors.New("key is not in the allowed list") - -func (s *Server) isValidRequest(req SignedMessage) error { - sign := req.GetSignature() - if sign == nil { - // TODO(@cthulhu-rider): #468 use "const" error - return errors.New("missing signature") - } - - var ( - key = sign.GetKey() - allowed = false - ) - - // check if key is allowed - for i := range s.allowedKeys { - if allowed = bytes.Equal(s.allowedKeys[i], key); allowed { - break - } - } - - if !allowed { - return errDisallowedKey - } - - // verify signature - binBody, err := req.ReadSignedData(nil) - if err != nil { - return fmt.Errorf("marshal request body: %w", err) - } - - // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion - var sigV2 refs.Signature - sigV2.SetKey(sign.GetKey()) - sigV2.SetSign(sign.GetSign()) - sigV2.SetScheme(refs.ECDSA_SHA512) - - var sig frostfscrypto.Signature - if err := sig.ReadFromV2(sigV2); err != nil { - return fmt.Errorf("can't read signature: %w", err) - } - - if !sig.Verify(binBody) { - // TODO(@cthulhu-rider): #468 use "const" error - return errors.New("invalid signature") - } - - return nil -} - -// SignMessage signs Control service message with private key. -func SignMessage(key *ecdsa.PrivateKey, msg SignedMessage) error { - binBody, err := msg.ReadSignedData(nil) - if err != nil { - return fmt.Errorf("marshal request body: %w", err) - } - - var sig frostfscrypto.Signature - - err = sig.Calculate(frostfsecdsa.Signer(*key), binBody) - if err != nil { - return fmt.Errorf("calculate signature: %w", err) - } - - // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion - var sigV2 refs.Signature - sig.WriteToV2(&sigV2) - - var sigControl control.Signature - sigControl.SetKey(sigV2.GetKey()) - sigControl.SetSign(sigV2.GetSign()) - - msg.SetSignature(&sigControl) - - return nil -} diff --git a/pkg/services/control/ir/service.proto b/pkg/services/control/ir/service.proto deleted file mode 100644 index fa58db568..000000000 --- a/pkg/services/control/ir/service.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; - -package ircontrol; - -import "pkg/services/control/ir/types.proto"; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/control"; - -// `ControlService` provides an interface for internal work with the Inner Ring -// node. -service ControlService { - // Performs health check of the IR node. - rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse); - // Forces a new epoch to be signaled by the IR node with high probability. - rpc TickEpoch(TickEpochRequest) returns (TickEpochResponse); - // Forces a node removal to be signaled by the IR node with high probability. - rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse); - // Forces a container removal to be signaled by the IR node with high - // probability. - rpc RemoveContainer(RemoveContainerRequest) returns (RemoveContainerResponse); -} - -// Health check request. -message HealthCheckRequest { - // Health check request body. - message Body {} - - // Body of health check request message. - Body body = 1; - - // Body signature. - // Should be signed by node key or one of - // the keys configured by the node. - Signature signature = 2; -} - -// Health check response. -message HealthCheckResponse { - // Health check response body - message Body { - // Health status of IR node application. - HealthStatus health_status = 1; - } - - // Body of health check response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -message TickEpochRequest { - message Body { - // Valid until block value override. - uint32 vub = 1; - } - - Body body = 1; - Signature signature = 2; -} - -message TickEpochResponse { - message Body { - // Valid until block value for transaction. - uint32 vub = 1; - } - - Body body = 1; - Signature signature = 2; -} - -message RemoveNodeRequest { - message Body { - bytes key = 1; - // Valid until block value override. - uint32 vub = 2; - } - - Body body = 1; - Signature signature = 2; -} - -message RemoveNodeResponse { - message Body { - // Valid until block value for transaction. - uint32 vub = 1; - } - - Body body = 1; - Signature signature = 2; -} - -message RemoveContainerRequest { - message Body { - bytes container_id = 1; - bytes owner = 2; - // Valid until block value override. - uint32 vub = 3; - } - - Body body = 1; - Signature signature = 2; -} - -message RemoveContainerResponse { - message Body { - // Valid until block value for transaction. - uint32 vub = 1; - } - - Body body = 1; - Signature signature = 2; -} diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go deleted file mode 100644 index d27746263..000000000 --- a/pkg/services/control/ir/service_frostfs.pb.go +++ /dev/null @@ -1,2838 +0,0 @@ -// Code generated by protoc-gen-go-frostfs. DO NOT EDIT. - -package control - -import ( - json "encoding/json" - fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" - easyproto "github.com/VictoriaMetrics/easyproto" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" - strconv "strconv" -) - -type HealthCheckRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil) - _ json.Marshaler = (*HealthCheckRequest_Body)(nil) - _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthCheckRequest struct { - Body *HealthCheckRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil) - _ json.Marshaler = (*HealthCheckRequest)(nil) - _ json.Unmarshaler = (*HealthCheckRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *HealthCheckRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(HealthCheckRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) { - x.Body = v -} -func (x *HealthCheckRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *HealthCheckRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *HealthCheckRequest_Body - f = new(HealthCheckRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthCheckResponse_Body struct { - HealthStatus HealthStatus `json:"healthStatus"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil) - _ json.Marshaler = (*HealthCheckResponse_Body)(nil) - _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.EnumSize(1, int32(x.HealthStatus)) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if int32(x.HealthStatus) != 0 { - mm.AppendInt32(1, int32(x.HealthStatus)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body") - } - switch fc.FieldNum { - case 1: // HealthStatus - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "HealthStatus") - } - x.HealthStatus = HealthStatus(data) - } - } - return nil -} -func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus { - if x != nil { - return x.HealthStatus - } - return 0 -} -func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) { - x.HealthStatus = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"healthStatus\":" - out.RawString(prefix) - v := int32(x.HealthStatus) - if vv, ok := HealthStatus_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "healthStatus": - { - var f HealthStatus - var parsedValue HealthStatus - switch v := in.Interface().(type) { - case string: - if vv, ok := HealthStatus_value[v]; ok { - parsedValue = HealthStatus(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = HealthStatus(vv) - case float64: - parsedValue = HealthStatus(v) - } - f = parsedValue - x.HealthStatus = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthCheckResponse struct { - Body *HealthCheckResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil) - _ json.Marshaler = (*HealthCheckResponse)(nil) - _ json.Unmarshaler = (*HealthCheckResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *HealthCheckResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(HealthCheckResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { - x.Body = v -} -func (x *HealthCheckResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *HealthCheckResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *HealthCheckResponse_Body - f = new(HealthCheckResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TickEpochRequest_Body struct { - Vub uint32 `json:"vub"` -} - -var ( - _ encoding.ProtoMarshaler = (*TickEpochRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*TickEpochRequest_Body)(nil) - _ json.Marshaler = (*TickEpochRequest_Body)(nil) - _ json.Unmarshaler = (*TickEpochRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TickEpochRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt32Size(1, x.Vub) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TickEpochRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TickEpochRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Vub != 0 { - mm.AppendUint32(1, x.Vub) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TickEpochRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TickEpochRequest_Body") - } - switch fc.FieldNum { - case 1: // Vub - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Vub") - } - x.Vub = data - } - } - return nil -} -func (x *TickEpochRequest_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} -func (x *TickEpochRequest_Body) SetVub(v uint32) { - x.Vub = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TickEpochRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"vub\":" - out.RawString(prefix) - out.Uint32(x.Vub) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TickEpochRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "vub": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Vub = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TickEpochRequest struct { - Body *TickEpochRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*TickEpochRequest)(nil) - _ encoding.ProtoUnmarshaler = (*TickEpochRequest)(nil) - _ json.Marshaler = (*TickEpochRequest)(nil) - _ json.Unmarshaler = (*TickEpochRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TickEpochRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *TickEpochRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *TickEpochRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TickEpochRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TickEpochRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TickEpochRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(TickEpochRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) { - x.Body = v -} -func (x *TickEpochRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *TickEpochRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TickEpochRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TickEpochRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TickEpochRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *TickEpochRequest_Body - f = new(TickEpochRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TickEpochResponse_Body struct { - Vub uint32 `json:"vub"` -} - -var ( - _ encoding.ProtoMarshaler = (*TickEpochResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*TickEpochResponse_Body)(nil) - _ json.Marshaler = (*TickEpochResponse_Body)(nil) - _ json.Unmarshaler = (*TickEpochResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TickEpochResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt32Size(1, x.Vub) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TickEpochResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TickEpochResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Vub != 0 { - mm.AppendUint32(1, x.Vub) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TickEpochResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TickEpochResponse_Body") - } - switch fc.FieldNum { - case 1: // Vub - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Vub") - } - x.Vub = data - } - } - return nil -} -func (x *TickEpochResponse_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} -func (x *TickEpochResponse_Body) SetVub(v uint32) { - x.Vub = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TickEpochResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"vub\":" - out.RawString(prefix) - out.Uint32(x.Vub) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TickEpochResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "vub": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Vub = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TickEpochResponse struct { - Body *TickEpochResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*TickEpochResponse)(nil) - _ encoding.ProtoUnmarshaler = (*TickEpochResponse)(nil) - _ json.Marshaler = (*TickEpochResponse)(nil) - _ json.Unmarshaler = (*TickEpochResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TickEpochResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *TickEpochResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *TickEpochResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TickEpochResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TickEpochResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TickEpochResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(TickEpochResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) { - x.Body = v -} -func (x *TickEpochResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *TickEpochResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TickEpochResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TickEpochResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TickEpochResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *TickEpochResponse_Body - f = new(TickEpochResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveNodeRequest_Body struct { - Key []byte `json:"key"` - Vub uint32 `json:"vub"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveNodeRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest_Body)(nil) - _ json.Marshaler = (*RemoveNodeRequest_Body)(nil) - _ json.Unmarshaler = (*RemoveNodeRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveNodeRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Key) - size += proto.UInt32Size(2, x.Vub) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveNodeRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveNodeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Key) != 0 { - mm.AppendBytes(1, x.Key) - } - if x.Vub != 0 { - mm.AppendUint32(2, x.Vub) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveNodeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest_Body") - } - switch fc.FieldNum { - case 1: // Key - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Key") - } - x.Key = data - case 2: // Vub - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Vub") - } - x.Vub = data - } - } - return nil -} -func (x *RemoveNodeRequest_Body) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} -func (x *RemoveNodeRequest_Body) SetKey(v []byte) { - x.Key = v -} -func (x *RemoveNodeRequest_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} -func (x *RemoveNodeRequest_Body) SetVub(v uint32) { - x.Vub = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveNodeRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"key\":" - out.RawString(prefix) - if x.Key != nil { - out.Base64Bytes(x.Key) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"vub\":" - out.RawString(prefix) - out.Uint32(x.Vub) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveNodeRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "key": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Key = f - } - case "vub": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Vub = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveNodeRequest struct { - Body *RemoveNodeRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveNodeRequest)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest)(nil) - _ json.Marshaler = (*RemoveNodeRequest)(nil) - _ json.Unmarshaler = (*RemoveNodeRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveNodeRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveNodeRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveNodeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveNodeRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveNodeRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveNodeRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) { - x.Body = v -} -func (x *RemoveNodeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveNodeRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveNodeRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveNodeRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveNodeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveNodeRequest_Body - f = new(RemoveNodeRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveNodeResponse_Body struct { - Vub uint32 `json:"vub"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveNodeResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse_Body)(nil) - _ json.Marshaler = (*RemoveNodeResponse_Body)(nil) - _ json.Unmarshaler = (*RemoveNodeResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveNodeResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt32Size(1, x.Vub) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveNodeResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveNodeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Vub != 0 { - mm.AppendUint32(1, x.Vub) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveNodeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse_Body") - } - switch fc.FieldNum { - case 1: // Vub - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Vub") - } - x.Vub = data - } - } - return nil -} -func (x *RemoveNodeResponse_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} -func (x *RemoveNodeResponse_Body) SetVub(v uint32) { - x.Vub = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveNodeResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"vub\":" - out.RawString(prefix) - out.Uint32(x.Vub) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveNodeResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "vub": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Vub = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveNodeResponse struct { - Body *RemoveNodeResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveNodeResponse)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse)(nil) - _ json.Marshaler = (*RemoveNodeResponse)(nil) - _ json.Unmarshaler = (*RemoveNodeResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveNodeResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveNodeResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveNodeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveNodeResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveNodeResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveNodeResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) { - x.Body = v -} -func (x *RemoveNodeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveNodeResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveNodeResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveNodeResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveNodeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveNodeResponse_Body - f = new(RemoveNodeResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveContainerRequest_Body struct { - ContainerId []byte `json:"containerId"` - Owner []byte `json:"owner"` - Vub uint32 `json:"vub"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveContainerRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest_Body)(nil) - _ json.Marshaler = (*RemoveContainerRequest_Body)(nil) - _ json.Unmarshaler = (*RemoveContainerRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveContainerRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.BytesSize(2, x.Owner) - size += proto.UInt32Size(3, x.Vub) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveContainerRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveContainerRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.Owner) != 0 { - mm.AppendBytes(2, x.Owner) - } - if x.Vub != 0 { - mm.AppendUint32(3, x.Vub) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveContainerRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // Owner - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Owner") - } - x.Owner = data - case 3: // Vub - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Vub") - } - x.Vub = data - } - } - return nil -} -func (x *RemoveContainerRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *RemoveContainerRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *RemoveContainerRequest_Body) GetOwner() []byte { - if x != nil { - return x.Owner - } - return nil -} -func (x *RemoveContainerRequest_Body) SetOwner(v []byte) { - x.Owner = v -} -func (x *RemoveContainerRequest_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} -func (x *RemoveContainerRequest_Body) SetVub(v uint32) { - x.Vub = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveContainerRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"owner\":" - out.RawString(prefix) - if x.Owner != nil { - out.Base64Bytes(x.Owner) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"vub\":" - out.RawString(prefix) - out.Uint32(x.Vub) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveContainerRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "owner": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Owner = f - } - case "vub": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Vub = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveContainerRequest struct { - Body *RemoveContainerRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveContainerRequest)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest)(nil) - _ json.Marshaler = (*RemoveContainerRequest)(nil) - _ json.Unmarshaler = (*RemoveContainerRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveContainerRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveContainerRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveContainerRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveContainerRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveContainerRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveContainerRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveContainerRequest) SetBody(v *RemoveContainerRequest_Body) { - x.Body = v -} -func (x *RemoveContainerRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveContainerRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveContainerRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveContainerRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveContainerRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveContainerRequest_Body - f = new(RemoveContainerRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveContainerResponse_Body struct { - Vub uint32 `json:"vub"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveContainerResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse_Body)(nil) - _ json.Marshaler = (*RemoveContainerResponse_Body)(nil) - _ json.Unmarshaler = (*RemoveContainerResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveContainerResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt32Size(1, x.Vub) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveContainerResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveContainerResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Vub != 0 { - mm.AppendUint32(1, x.Vub) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveContainerResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse_Body") - } - switch fc.FieldNum { - case 1: // Vub - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Vub") - } - x.Vub = data - } - } - return nil -} -func (x *RemoveContainerResponse_Body) GetVub() uint32 { - if x != nil { - return x.Vub - } - return 0 -} -func (x *RemoveContainerResponse_Body) SetVub(v uint32) { - x.Vub = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveContainerResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"vub\":" - out.RawString(prefix) - out.Uint32(x.Vub) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveContainerResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "vub": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Vub = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveContainerResponse struct { - Body *RemoveContainerResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveContainerResponse)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse)(nil) - _ json.Marshaler = (*RemoveContainerResponse)(nil) - _ json.Unmarshaler = (*RemoveContainerResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveContainerResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveContainerResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveContainerResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveContainerResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveContainerResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveContainerResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveContainerResponse) SetBody(v *RemoveContainerResponse_Body) { - x.Body = v -} -func (x *RemoveContainerResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveContainerResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveContainerResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveContainerResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveContainerResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveContainerResponse_Body - f = new(RemoveContainerResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/control/ir/service_grpc.pb.go b/pkg/services/control/ir/service_grpc.pb.go deleted file mode 100644 index 336bf5f70..000000000 --- a/pkg/services/control/ir/service_grpc.pb.go +++ /dev/null @@ -1,228 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.0 -// source: pkg/services/control/ir/service.proto - -package control - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - ControlService_HealthCheck_FullMethodName = "/ircontrol.ControlService/HealthCheck" - ControlService_TickEpoch_FullMethodName = "/ircontrol.ControlService/TickEpoch" - ControlService_RemoveNode_FullMethodName = "/ircontrol.ControlService/RemoveNode" - ControlService_RemoveContainer_FullMethodName = "/ircontrol.ControlService/RemoveContainer" -) - -// ControlServiceClient is the client API for ControlService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ControlServiceClient interface { - // Performs health check of the IR node. - HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) - // Forces a new epoch to be signaled by the IR node with high probability. - TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error) - // Forces a node removal to be signaled by the IR node with high probability. - RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) - // Forces a container removal to be signaled by the IR node with high - // probability. - RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) -} - -type controlServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient { - return &controlServiceClient{cc} -} - -func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error) { - out := new(TickEpochResponse) - err := c.cc.Invoke(ctx, ControlService_TickEpoch_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) { - out := new(RemoveNodeResponse) - err := c.cc.Invoke(ctx, ControlService_RemoveNode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) { - out := new(RemoveContainerResponse) - err := c.cc.Invoke(ctx, ControlService_RemoveContainer_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ControlServiceServer is the server API for ControlService service. -// All implementations should embed UnimplementedControlServiceServer -// for forward compatibility -type ControlServiceServer interface { - // Performs health check of the IR node. - HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) - // Forces a new epoch to be signaled by the IR node with high probability. - TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error) - // Forces a node removal to be signaled by the IR node with high probability. - RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) - // Forces a container removal to be signaled by the IR node with high - // probability. - RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error) -} - -// UnimplementedControlServiceServer should be embedded to have forward compatible implementations. -type UnimplementedControlServiceServer struct { -} - -func (UnimplementedControlServiceServer) HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented") -} -func (UnimplementedControlServiceServer) TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TickEpoch not implemented") -} -func (UnimplementedControlServiceServer) RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveNode not implemented") -} -func (UnimplementedControlServiceServer) RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveContainer not implemented") -} - -// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ControlServiceServer will -// result in compilation errors. -type UnsafeControlServiceServer interface { - mustEmbedUnimplementedControlServiceServer() -} - -func RegisterControlServiceServer(s grpc.ServiceRegistrar, srv ControlServiceServer) { - s.RegisterService(&ControlService_ServiceDesc, srv) -} - -func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).HealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_HealthCheck_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_TickEpoch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TickEpochRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).TickEpoch(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_TickEpoch_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).TickEpoch(ctx, req.(*TickEpochRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveNodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).RemoveNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_RemoveNode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).RemoveNode(ctx, req.(*RemoveNodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_RemoveContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).RemoveContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_RemoveContainer_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).RemoveContainer(ctx, req.(*RemoveContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ControlService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ircontrol.ControlService", - HandlerType: (*ControlServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "HealthCheck", - Handler: _ControlService_HealthCheck_Handler, - }, - { - MethodName: "TickEpoch", - Handler: _ControlService_TickEpoch_Handler, - }, - { - MethodName: "RemoveNode", - Handler: _ControlService_RemoveNode_Handler, - }, - { - MethodName: "RemoveContainer", - Handler: _ControlService_RemoveContainer_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "pkg/services/control/ir/service.proto", -} diff --git a/pkg/services/control/ir/types.proto b/pkg/services/control/ir/types.proto deleted file mode 100644 index 901a55918..000000000 --- a/pkg/services/control/ir/types.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package ircontrol; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/control"; - -// Signature of some message. -message Signature { - // Public key used for signing. - bytes key = 1 [ json_name = "key" ]; - - // Binary signature. - bytes sign = 2 [ json_name = "signature" ]; -} - -// Health status of the IR application. -enum HealthStatus { - // Undefined status, default value. - HEALTH_STATUS_UNDEFINED = 0; - - // IR application is starting. - STARTING = 1; - - // IR application is started and serves all services. - READY = 2; - - // IR application is shutting down. - SHUTTING_DOWN = 3; - - // IR application is reconfiguring. - RECONFIGURING = 4; -} diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go deleted file mode 100644 index 407eec6ad..000000000 --- a/pkg/services/control/ir/types_frostfs.pb.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by protoc-gen-go-frostfs. DO NOT EDIT. - -package control - -import ( - json "encoding/json" - fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" - easyproto "github.com/VictoriaMetrics/easyproto" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" - strconv "strconv" -) - -type HealthStatus int32 - -const ( - HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0 - HealthStatus_STARTING HealthStatus = 1 - HealthStatus_READY HealthStatus = 2 - HealthStatus_SHUTTING_DOWN HealthStatus = 3 - HealthStatus_RECONFIGURING HealthStatus = 4 -) - -var ( - HealthStatus_name = map[int32]string{ - 0: "HEALTH_STATUS_UNDEFINED", - 1: "STARTING", - 2: "READY", - 3: "SHUTTING_DOWN", - 4: "RECONFIGURING", - } - HealthStatus_value = map[string]int32{ - "HEALTH_STATUS_UNDEFINED": 0, - "STARTING": 1, - "READY": 2, - "SHUTTING_DOWN": 3, - "RECONFIGURING": 4, - } -) - -func (x HealthStatus) String() string { - if v, ok := HealthStatus_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *HealthStatus) FromString(s string) bool { - if v, ok := HealthStatus_value[s]; ok { - *x = HealthStatus(v) - return true - } - return false -} - -type Signature struct { - Key []byte `json:"key"` - Sign []byte `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*Signature)(nil) - _ encoding.ProtoUnmarshaler = (*Signature)(nil) - _ json.Marshaler = (*Signature)(nil) - _ json.Unmarshaler = (*Signature)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *Signature) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Key) - size += proto.BytesSize(2, x.Sign) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *Signature) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Key) != 0 { - mm.AppendBytes(1, x.Key) - } - if len(x.Sign) != 0 { - mm.AppendBytes(2, x.Sign) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *Signature) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "Signature") - } - switch fc.FieldNum { - case 1: // Key - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Key") - } - x.Key = data - case 2: // Sign - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Sign") - } - x.Sign = data - } - } - return nil -} -func (x *Signature) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} -func (x *Signature) SetKey(v []byte) { - x.Key = v -} -func (x *Signature) GetSign() []byte { - if x != nil { - return x.Sign - } - return nil -} -func (x *Signature) SetSign(v []byte) { - x.Sign = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *Signature) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"key\":" - out.RawString(prefix) - if x.Key != nil { - out.Base64Bytes(x.Key) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - if x.Sign != nil { - out.Base64Bytes(x.Sign) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *Signature) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "key": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Key = f - } - case "signature": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Sign = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go deleted file mode 100644 index 0c4236d0e..000000000 --- a/pkg/services/control/rpc.go +++ /dev/null @@ -1,386 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" -) - -const serviceName = "control.ControlService" - -const ( - rpcHealthCheck = "HealthCheck" - rpcSetNetmapStatus = "SetNetmapStatus" - rpcGetNetmapStatus = "GetNetmapStatus" - rpcDropObjects = "DropObjects" - rpcListShards = "ListShards" - rpcSetShardMode = "SetShardMode" - rpcSynchronizeTree = "SynchronizeTree" - rpcStartShardEvacuation = "StartShardEvacuation" - rpcGetShardEvacuationStatus = "GetShardEvacuationStatus" - rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus" - rpcStopShardEvacuation = "StopShardEvacuation" - rpcFlushCache = "FlushCache" - rpcDoctor = "Doctor" - rpcAddChainLocalOverride = "AddChainLocalOverride" - rpcGetChainLocalOverride = "GetChainLocalOverride" - rpcListChainLocalOverrides = "ListChainLocalOverrides" - rpcRemoveChainLocalOverride = "RemoveChainLocalOverride" - rpcRemoveChainLocalOverridesByTarget = "RemoveChainLocalOverridesByTarget" - rpcSealWriteCache = "SealWriteCache" - rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides" - rpcDetachShards = "DetachShards" - rpcStartShardRebuild = "StartShardRebuild" - rpcListShardsForObject = "ListShardsForObject" -) - -// HealthCheck executes ControlService.HealthCheck RPC. -func HealthCheck( - cli *client.Client, - req *HealthCheckRequest, - opts ...client.CallOption, -) (*HealthCheckResponse, error) { - wResp := newResponseWrapper[HealthCheckResponse]() - wReq := &requestWrapper{ - m: req, - } - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcHealthCheck), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// SetNetmapStatus executes ControlService.SetNetmapStatus RPC. -func SetNetmapStatus( - cli *client.Client, - req *SetNetmapStatusRequest, - opts ...client.CallOption, -) (*SetNetmapStatusResponse, error) { - wResp := newResponseWrapper[SetNetmapStatusResponse]() - - wReq := &requestWrapper{ - m: req, - } - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSetNetmapStatus), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// GetNetmapStatus executes ControlService.GetNetmapStatus RPC. -func GetNetmapStatus( - _ context.Context, - cli *client.Client, - req *GetNetmapStatusRequest, - opts ...client.CallOption, -) (*GetNetmapStatusResponse, error) { - wResp := newResponseWrapper[GetNetmapStatusResponse]() - - wReq := &requestWrapper{ - m: req, - } - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetNetmapStatus), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// DropObjects executes ControlService.DropObjects RPC. -func DropObjects( - cli *client.Client, - req *DropObjectsRequest, - opts ...client.CallOption, -) (*DropObjectsResponse, error) { - wResp := newResponseWrapper[DropObjectsResponse]() - - wReq := &requestWrapper{ - m: req, - } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDropObjects), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// ListShards executes ControlService.ListShards RPC. -func ListShards( - cli *client.Client, - req *ListShardsRequest, - opts ...client.CallOption, -) (*ListShardsResponse, error) { - wResp := newResponseWrapper[ListShardsResponse]() - - wReq := &requestWrapper{ - m: req, - } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShards), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// SetShardMode executes ControlService.SetShardMode RPC. -func SetShardMode( - cli *client.Client, - req *SetShardModeRequest, - opts ...client.CallOption, -) (*SetShardModeResponse, error) { - wResp := newResponseWrapper[SetShardModeResponse]() - - wReq := &requestWrapper{ - m: req, - } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSetShardMode), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// SynchronizeTree executes ControlService.SynchronizeTree RPC. -func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...client.CallOption) (*SynchronizeTreeResponse, error) { - wResp := newResponseWrapper[SynchronizeTreeResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSynchronizeTree), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// StartShardEvacuation executes ControlService.StartShardEvacuation RPC. -func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) { - wResp := newResponseWrapper[StartShardEvacuationResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardEvacuation), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// GetShardEvacuationStatus executes ControlService.GetShardEvacuationStatus RPC. -func GetShardEvacuationStatus(cli *client.Client, req *GetShardEvacuationStatusRequest, opts ...client.CallOption) (*GetShardEvacuationStatusResponse, error) { - wResp := newResponseWrapper[GetShardEvacuationStatusResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetShardEvacuationStatus), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// StopShardEvacuation executes ControlService.StopShardEvacuation RPC. -func StopShardEvacuation(cli *client.Client, req *StopShardEvacuationRequest, opts ...client.CallOption) (*StopShardEvacuationResponse, error) { - wResp := newResponseWrapper[StopShardEvacuationResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStopShardEvacuation), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// ResetShardEvacuationStatus executes ControlService.ResetShardEvacuationStatus RPC. -func ResetShardEvacuationStatus(cli *client.Client, req *ResetShardEvacuationStatusRequest, opts ...client.CallOption) (*ResetShardEvacuationStatusResponse, error) { - wResp := newResponseWrapper[ResetShardEvacuationStatusResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcResetShardEvacuationStatus), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// FlushCache executes ControlService.FlushCache RPC. -func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallOption) (*FlushCacheResponse, error) { - wResp := newResponseWrapper[FlushCacheResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcFlushCache), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// Doctor executes ControlService.Doctor RPC. -func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) (*DoctorResponse, error) { - wResp := newResponseWrapper[DoctorResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDoctor), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// AddChainLocalOverride executes ControlService.AddChainLocalOverride RPC. -func AddChainLocalOverride(cli *client.Client, req *AddChainLocalOverrideRequest, opts ...client.CallOption) (*AddChainLocalOverrideResponse, error) { - wResp := newResponseWrapper[AddChainLocalOverrideResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcAddChainLocalOverride), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// ListChainLocalOverrides executes ControlService.ListChainLocalOverrides RPC. -func ListChainLocalOverrides(cli *client.Client, req *ListChainLocalOverridesRequest, opts ...client.CallOption) (*ListChainLocalOverridesResponse, error) { - wResp := newResponseWrapper[ListChainLocalOverridesResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListChainLocalOverrides), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// ListTargetsLocalOverrides executes ControlService.ListTargetsLocalOverrides RPC. -func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverridesRequest, opts ...client.CallOption) (*ListTargetsLocalOverridesResponse, error) { - wResp := newResponseWrapper[ListTargetsLocalOverridesResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListTargetsLocalOverrides), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC. -func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) { - wResp := newResponseWrapper[GetChainLocalOverrideResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetChainLocalOverride), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC. -func RemoveChainLocalOverride(cli *client.Client, req *RemoveChainLocalOverrideRequest, opts ...client.CallOption) (*RemoveChainLocalOverrideResponse, error) { - wResp := newResponseWrapper[RemoveChainLocalOverrideResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRemoveChainLocalOverride), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// RemoveChainLocalOverridesByTarget executes ControlService.RemoveChainLocalOverridesByTarget RPC. -func RemoveChainLocalOverridesByTarget(cli *client.Client, req *RemoveChainLocalOverridesByTargetRequest, opts ...client.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) { - wResp := newResponseWrapper[RemoveChainLocalOverridesByTargetResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRemoveChainLocalOverridesByTarget), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// SealWriteCache executes ControlService.SealWriteCache RPC. -func SealWriteCache(cli *client.Client, req *SealWriteCacheRequest, opts ...client.CallOption) (*SealWriteCacheResponse, error) { - wResp := newResponseWrapper[SealWriteCacheResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSealWriteCache), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// DetachShards executes ControlService.DetachShards RPC. -func DetachShards( - cli *client.Client, - req *DetachShardsRequest, - opts ...client.CallOption, -) (*DetachShardsResponse, error) { - wResp := newResponseWrapper[DetachShardsResponse]() - - wReq := &requestWrapper{ - m: req, - } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDetachShards), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// StartShardRebuild executes ControlService.StartShardRebuild RPC. -func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts ...client.CallOption) (*StartShardRebuildResponse, error) { - wResp := newResponseWrapper[StartShardRebuildResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardRebuild), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - -// ListShardsForObject executes ControlService.ListShardsForObject RPC. -func ListShardsForObject( - cli *client.Client, - req *ListShardsForObjectRequest, - opts ...client.CallOption, -) (*ListShardsForObjectResponse, error) { - wResp := newResponseWrapper[ListShardsForObjectResponse]() - - wReq := &requestWrapper{ - m: req, - } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} diff --git a/pkg/services/control/server/ape/validate.go b/pkg/services/control/server/ape/validate.go deleted file mode 100644 index f4aa0399f..000000000 --- a/pkg/services/control/server/ape/validate.go +++ /dev/null @@ -1,97 +0,0 @@ -package ape - -import ( - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" -) - -var ( - ErrInvalidResource = errors.New("invalid resource name") - ErrUnsupportedPrefix = errors.New("unsupported resource name prefix") - ErrInvalidContainerID = errors.New("invalid container id") - ErrInvalidObjectID = errors.New("invalid object id") - ErrInvalidNamespace = fmt.Errorf("namespace must match regexp: %s", ape.NamespaceNameRegexp.String()) -) - -// ValidateResourceName validates resource name components - container and object id, namespace. -// Also validates matching resource name to templates of policy engine's native scheme. -func ValidateResourceName(name string) error { - if after, found := strings.CutPrefix(name, native.ObjectPrefix+"/"); found { - return validateObjectResourceName(after) - } else if after, found = strings.CutPrefix(name, native.ContainerPrefix+"/"); found { - return validateContainerResourceName(after) - } - return ErrUnsupportedPrefix -} - -// validateObjectResourceName validate name for object. -// Name should be without prefix `native.ObjectPrefix`. -func validateObjectResourceName(name string) error { - if name == "*" { - return nil - } - lexems := strings.Split(name, "/") - if len(lexems) == 1 && lexems[0] == "*" { - return nil - } else if len(lexems) == 2 { - // len == 2 means format `namespace(root_namespace)/*` - if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) { - return ErrInvalidNamespace - } - if lexems[1] == "*" { - return nil - } - } else if len(lexems) == 3 { - // len == 3 means format `namespace(root_namespace)/CID/OID(*)` - if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) { - return ErrInvalidNamespace - } - var cnr cid.ID - err := cnr.DecodeString(lexems[1]) - if err != nil { - return fmt.Errorf("%w: %w", ErrInvalidContainerID, err) - } - if lexems[2] == "*" { - return nil - } - var objID oid.ID - err = objID.DecodeString(lexems[2]) - if err != nil { - return fmt.Errorf("%w: %w", ErrInvalidObjectID, err) - } - return nil - } - return ErrInvalidResource -} - -// validateContainerResourceName validate resource name for container. -// Name should be without prefix `native.ContainerPrefix`. -func validateContainerResourceName(name string) error { - if name == "*" { - return nil - } - lexems := strings.Split(name, "/") - if len(lexems) == 1 && lexems[0] == "*" { - return nil - } else if len(lexems) == 2 { - // len == 2 means format `namespace(root_namespace)/CID(*)` - if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) { - return ErrInvalidNamespace - } - if lexems[1] != "*" { - var cnr cid.ID - err := cnr.DecodeString(lexems[1]) - if err != nil { - return fmt.Errorf("%w: %w", ErrInvalidContainerID, err) - } - } - return nil - } - return ErrInvalidResource -} diff --git a/pkg/services/control/server/ape/validate_test.go b/pkg/services/control/server/ape/validate_test.go deleted file mode 100644 index af811efed..000000000 --- a/pkg/services/control/server/ape/validate_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package ape - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" -) - -func TestValidationOfChainResources(t *testing.T) { - tests := [...]struct { - testName string - resourceName string - expectErr error - }{ - { - testName: "native object: all objects", - resourceName: native.ObjectPrefix + "/*", - }, - { - testName: "native object: all objects in namespace", - resourceName: native.ObjectPrefix + "/ns/*", - }, - { - testName: "native object: all objects in root namespace", - resourceName: native.ObjectPrefix + "//*", - }, - { - testName: "native object: all objects in namespace/container", - resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/*", - }, - { - testName: "native object: all objects in root namespace/container", - resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/*", - }, - { - testName: "native object: object in namespace/container", - resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY", - }, - { - testName: "native object: object in root namespace/container", - resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY", - }, - { - testName: "native object: invalid all objects", - resourceName: native.ObjectPrefix + "/*12313", - expectErr: ErrInvalidResource, - }, - { - testName: "native object: all objects in invalid namespace", - resourceName: native.ObjectPrefix + "/qwe_123123/*", - expectErr: ErrInvalidNamespace, - }, - { - testName: "native object: invalid all objects in root namespace", - resourceName: native.ObjectPrefix + "//qwe", - expectErr: ErrInvalidResource, - }, - { - testName: "native object: invalid cid in all objects in root namespace", - resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytHqwe/*", - expectErr: ErrInvalidContainerID, - }, - { - testName: "native object: invalid cid in all objects in namespace", - resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytHqwe/*", - expectErr: ErrInvalidContainerID, - }, - { - testName: "native object: invalid object in namespace/container", - resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY111", - expectErr: ErrInvalidObjectID, - }, - { - testName: "native object: invalid resource", - resourceName: native.ObjectPrefix + "/ns/SeHNpifD/AFkrsuvWY111/AFkrsuvWY222", - expectErr: ErrInvalidResource, - }, - { - testName: "native container: all containers", - resourceName: native.ContainerPrefix + "/*", - }, - { - testName: "native container: all containers in namespace", - resourceName: native.ContainerPrefix + "/ns/*", - }, - { - testName: "native container: all containers in root namespace", - resourceName: native.ContainerPrefix + "//*", - }, - { - testName: "native container: container in namespace", - resourceName: native.ContainerPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH", - }, - { - testName: "native container: container in root namespace", - resourceName: native.ContainerPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH", - }, - { - testName: "native container: invalid all containers", - resourceName: native.ContainerPrefix + "/*asd", - expectErr: ErrInvalidResource, - }, - { - testName: "native container: invalid resource", - resourceName: native.ContainerPrefix + "/ns/cid/cid", - expectErr: ErrInvalidResource, - }, - { - testName: "native container: invalid container in root namespace", - resourceName: native.ContainerPrefix + "//*asd", - expectErr: ErrInvalidContainerID, - }, - { - testName: "native container: container in invalid namespace", - resourceName: native.ContainerPrefix + "/ns_111/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH", - expectErr: ErrInvalidNamespace, - }, - { - testName: "unsupported prefix", - resourceName: "native:test/ns_111/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH", - expectErr: ErrUnsupportedPrefix, - }, - } - - for _, test := range tests { - t.Run(test.testName, func(t *testing.T) { - err := ValidateResourceName(test.resourceName) - require.ErrorIs(t, err, test.expectErr) - }) - } -} diff --git a/pkg/services/control/server/convert.go b/pkg/services/control/server/convert.go deleted file mode 100644 index 61d7e41c1..000000000 --- a/pkg/services/control/server/convert.go +++ /dev/null @@ -1,63 +0,0 @@ -package control - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "github.com/mr-tron/base58" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func stateToResponse(state *engine.EvacuationState) (*control.GetShardEvacuationStatusResponse, error) { - shardIDs := make([][]byte, 0, len(state.ShardIDs())) - for _, shID := range state.ShardIDs() { - id, err := base58.Decode(shID) - if err != nil { - return nil, status.Error(codes.Internal, "invalid shard id format: "+shID) - } - shardIDs = append(shardIDs, id) - } - var evacStatus control.GetShardEvacuationStatusResponse_Body_Status - switch state.ProcessingStatus() { - case engine.EvacuateProcessStateRunning: - evacStatus = control.GetShardEvacuationStatusResponse_Body_RUNNING - case engine.EvacuateProcessStateCompleted: - evacStatus = control.GetShardEvacuationStatusResponse_Body_COMPLETED - default: - evacStatus = control.GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED - } - var startedAt *control.GetShardEvacuationStatusResponse_Body_UnixTimestamp - if state.StartedAt() != nil { - startedAt = &control.GetShardEvacuationStatusResponse_Body_UnixTimestamp{ - Value: state.StartedAt().Unix(), - } - } - var duration *control.GetShardEvacuationStatusResponse_Body_Duration - if state.StartedAt() != nil { - end := time.Now().UTC() - if state.FinishedAt() != nil { - end = *state.FinishedAt() - } - duration = &control.GetShardEvacuationStatusResponse_Body_Duration{ - Seconds: int64(end.Sub(*state.StartedAt()).Seconds()), - } - } - return &control.GetShardEvacuationStatusResponse{ - Body: &control.GetShardEvacuationStatusResponse_Body{ - Shard_ID: shardIDs, - EvacuatedObjects: state.ObjectsEvacuated(), - TotalObjects: state.ObjectsTotal(), - FailedObjects: state.ObjectsFailed(), - Status: evacStatus, - StartedAt: startedAt, - Duration: duration, - ErrorMessage: state.ErrorMessage(), - SkippedObjects: state.ObjectsSkipped(), - TotalTrees: state.TreesTotal(), - EvacuatedTrees: state.TreesEvacuated(), - FailedTrees: state.TreesFailed(), - }, - }, nil -} diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go deleted file mode 100644 index d9d5c5f5e..000000000 --- a/pkg/services/control/server/ctrlmessage/sign.go +++ /dev/null @@ -1,44 +0,0 @@ -package ctrlmessage - -import ( - "crypto/ecdsa" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" -) - -type SignedMessage interface { - ReadSignedData([]byte) ([]byte, error) - GetSignature() *control.Signature - SetSignature(*control.Signature) -} - -// Sign signs Control service ctrlmessage with private key. -func Sign(key *ecdsa.PrivateKey, msg SignedMessage) error { - binBody, err := msg.ReadSignedData(nil) - if err != nil { - return fmt.Errorf("marshal request body: %w", err) - } - - var sig frostfscrypto.Signature - - err = sig.Calculate(frostfsecdsa.Signer(*key), binBody) - if err != nil { - return fmt.Errorf("calculate signature: %w", err) - } - - // TODO(@cthulhu-rider): #468 use Signature ctrlmessage from FrostFS API to avoid conversion - var sigV2 refs.Signature - sig.WriteToV2(&sigV2) - - var sigControl control.Signature - sigControl.SetKey(sigV2.GetKey()) - sigControl.SetSign(sigV2.GetSign()) - - msg.SetSignature(&sigControl) - - return nil -} diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go deleted file mode 100644 index ffd36962b..000000000 --- a/pkg/services/control/server/detach_shards.go +++ /dev/null @@ -1,38 +0,0 @@ -package control - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - shardIDs := s.getShardIDList(req.GetBody().GetShard_ID()) - - if err := s.s.DetachShards(ctx, shardIDs); err != nil { - if errors.As(err, new(logicerr.Logical)) { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.DetachShardsResponse{ - Body: &control.DetachShardsResponse_Body{}, - } - - if err = ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/server/doctor.go b/pkg/services/control/server/doctor.go deleted file mode 100644 index 80041de44..000000000 --- a/pkg/services/control/server/doctor.go +++ /dev/null @@ -1,38 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*control.DoctorResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - if !req.GetBody().GetRemoveDuplicates() { - return nil, status.Error(codes.InvalidArgument, "operation not specified") - } - - var prm engine.RemoveDuplicatesPrm - prm.Concurrency = int(req.GetBody().GetConcurrency()) - - err = s.s.RemoveDuplicates(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.DoctorResponse{Body: &control.DoctorResponse_Body{}} - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go deleted file mode 100644 index f3ba9015e..000000000 --- a/pkg/services/control/server/evacuate_async.go +++ /dev/null @@ -1,280 +0,0 @@ -package control - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") - -func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - if req.GetBody().GetScope() == uint32(control.StartShardEvacuationRequest_Body_NONE) { - return nil, status.Error(codes.InvalidArgument, "no evacuation scope") - } - - prm := engine.EvacuateShardPrm{ - ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - ObjectsHandler: s.replicateObject, - TreeHandler: s.replicateTree, - Scope: engine.EvacuateScope(req.GetBody().GetScope()), - ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(), - ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(), - RepOneOnly: req.GetBody().GetRepOneOnly(), - } - - if err = s.s.Evacuate(ctx, prm); err != nil { - var logicalErr logicerr.Logical - if errors.As(err, &logicalErr) { - return nil, status.Error(codes.Aborted, err.Error()) - } - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.StartShardEvacuationResponse{ - Body: &control.StartShardEvacuationResponse_Body{}, - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) GetShardEvacuationStatus(ctx context.Context, req *control.GetShardEvacuationStatusRequest) (*control.GetShardEvacuationStatusResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - state, err := s.s.GetEvacuationState(ctx) - if err != nil { - var logicalErr logicerr.Logical - if errors.As(err, &logicalErr) { - return nil, status.Error(codes.Aborted, err.Error()) - } - return nil, status.Error(codes.Internal, err.Error()) - } - - resp, err := stateToResponse(state) - if err != nil { - return nil, err - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) StopShardEvacuation(ctx context.Context, req *control.StopShardEvacuationRequest) (*control.StopShardEvacuationResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - err = s.s.EnqueRunningEvacuationStop(ctx) - if err != nil { - var logicalErr logicerr.Logical - if errors.As(err, &logicalErr) { - return nil, status.Error(codes.Aborted, err.Error()) - } - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.StopShardEvacuationResponse{ - Body: &control.StopShardEvacuationResponse_Body{}, - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - s.s.ResetEvacuationStatusForShards() - - return resp, nil -} - -func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.ResetShardEvacuationStatusRequest) (*control.ResetShardEvacuationStatusResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - err = s.s.ResetEvacuationStatus(ctx) - if err != nil { - var logicalErr logicerr.Logical - if errors.As(err, &logicalErr) { - return nil, status.Error(codes.Aborted, err.Error()) - } - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.ResetShardEvacuationStatusResponse{ - Body: &control.ResetShardEvacuationStatusResponse_Body{}, - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - cid, ok := obj.ContainerID() - if !ok { - // Return nil to prevent situations where a shard can't be evacuated - // because of a single bad/corrupted object. - return false, nil - } - - nodes, err := s.getContainerNodes(ctx, cid) - if err != nil { - return false, err - } - - if len(nodes) == 0 { - return false, nil - } - - var res replicatorResult - task := replicator.Task{ - NumCopies: 1, - Addr: addr, - Obj: obj, - Nodes: nodes, - } - s.replicator.HandleReplicationTask(ctx, task, &res) - - if res.count == 0 { - return false, errors.New("object was not replicated") - } - return true, nil -} - -func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { - nodes, err := s.getContainerNodes(ctx, contID) - if err != nil { - return false, "", err - } - if len(nodes) == 0 { - return false, "", nil - } - - for _, node := range nodes { - err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) - if err == nil { - return true, hex.EncodeToString(node.PublicKey()), nil - } - } - return false, "", err -} - -func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { - rawCID := make([]byte, sha256.Size) - contID.Encode(rawCID) - - var height uint64 - for { - op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) - if err != nil { - return err - } - - if op.Time == 0 { - return nil - } - - req := &tree.ApplyRequest{ - Body: &tree.ApplyRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Operation: &tree.LogMove{ - ParentId: op.Parent, - Meta: op.Bytes(), - ChildId: op.Child, - }, - }, - } - - err = tree.SignMessage(req, s.key) - if err != nil { - return fmt.Errorf("can't message apply request: %w", err) - } - - err = s.treeService.ReplicateTreeOp(ctx, node, req) - if err != nil { - return err - } - - height = op.Time + 1 - } -} - -func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) { - nm, err := s.netMapSrc.GetNetMap(ctx, 0) - if err != nil { - return nil, err - } - - c, err := s.cnrSrc.Get(ctx, contID) - if err != nil { - return nil, err - } - - binCnr := make([]byte, sha256.Size) - contID.Encode(binCnr) - - ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) - if err != nil { - return nil, errFailedToBuildListOfContainerNodes - } - - nodes := placement.FlattenNodes(ns) - bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() - for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body - if bytes.Equal(nodes[i].PublicKey(), bs) { - copy(nodes[i:], nodes[i+1:]) - nodes = nodes[:len(nodes)-1] - } - } - return nodes, nil -} - -type replicatorResult struct { - count int -} - -// SubmitSuccessfulReplication implements the replicator.TaskResult interface. -func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { - r.count++ -} diff --git a/pkg/services/control/server/flush_cache.go b/pkg/services/control/server/flush_cache.go deleted file mode 100644 index 031002d71..000000000 --- a/pkg/services/control/server/flush_cache.go +++ /dev/null @@ -1,37 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) FlushCache(ctx context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) { - var prm engine.FlushWriteCachePrm - prm.SetShardID(shardID) - prm.SetSeal(req.GetBody().GetSeal()) - - _, err = s.s.FlushWriteCache(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - } - - resp := &control.FlushCacheResponse{Body: &control.FlushCacheResponse_Body{}} - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go deleted file mode 100644 index a8ef7809e..000000000 --- a/pkg/services/control/server/gc.go +++ /dev/null @@ -1,66 +0,0 @@ -package control - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// DropObjects marks objects to be removed from the local node. -// -// Objects are marked via garbage collector's callback. -// -// If some address is not a valid object address in a binary format, an error returns. -// If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) { - // verify request - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - binAddrList := req.GetBody().GetAddressList() - addrList := make([]oid.Address, len(binAddrList)) - - for i := range binAddrList { - err := addrList[i].DecodeString(string(binAddrList[i])) - if err != nil { - return nil, status.Error(codes.InvalidArgument, - fmt.Sprintf("invalid binary object address: %v", err), - ) - } - } - - var firstErr error - for i := range addrList { - var prm engine.DeletePrm - prm.WithForceRemoval() - prm.WithAddress(addrList[i]) - - if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil { - firstErr = err - } - } - - if firstErr != nil { - return nil, status.Error(codes.Internal, firstErr.Error()) - } - - // create and fill response - resp := new(control.DropObjectsResponse) - - body := new(control.DropObjectsResponse_Body) - resp.SetBody(body) - - // sign the response - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go deleted file mode 100644 index 5e0496910..000000000 --- a/pkg/services/control/server/get_netmap_status.go +++ /dev/null @@ -1,35 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// GetNetmapStatus gets node status in FrostFS network. -func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - st, epoch, err := s.nodeState.GetNetmapStatus(ctx) - if err != nil { - return nil, err - } - - resp := &control.GetNetmapStatusResponse{ - Body: &control.GetNetmapStatusResponse_Body{ - Status: st, - Epoch: epoch, - }, - } - - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/server/healthcheck.go b/pkg/services/control/server/healthcheck.go deleted file mode 100644 index 121c51280..000000000 --- a/pkg/services/control/server/healthcheck.go +++ /dev/null @@ -1,36 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// HealthCheck returns health status of the local node. -// -// If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) { - // verify request - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - // create and fill response - resp := new(control.HealthCheckResponse) - - body := new(control.HealthCheckResponse_Body) - resp.SetBody(body) - - body.SetNetmapStatus(s.healthChecker.NetmapStatus()) - body.SetHealthStatus(s.healthChecker.HealthStatus()) - - // sign the response - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/server/helpers.go b/pkg/services/control/server/helpers.go deleted file mode 100644 index 25e54cd98..000000000 --- a/pkg/services/control/server/helpers.go +++ /dev/null @@ -1,20 +0,0 @@ -package control - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - -func (s *Server) getShardIDList(raw [][]byte) []*shard.ID { - if len(raw) != 0 { - res := make([]*shard.ID, 0, len(raw)) - for i := range raw { - res = append(res, shard.NewIDFromBytes(raw[i])) - } - return res - } - - info := s.s.DumpInfo() - res := make([]*shard.ID, 0, len(info.Shards)) - for i := range info.Shards { - res = append(res, info.Shards[i].ID) - } - return res -} diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go deleted file mode 100644 index efe2754ea..000000000 --- a/pkg/services/control/server/list_shards.go +++ /dev/null @@ -1,80 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (*control.ListShardsResponse, error) { - // verify request - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - // create and fill response - resp := new(control.ListShardsResponse) - - body := new(control.ListShardsResponse_Body) - resp.SetBody(body) - - info := s.s.DumpInfo() - - shardInfos := make([]control.ShardInfo, 0, len(info.Shards)) - - for _, sh := range info.Shards { - si := new(control.ShardInfo) - - si.SetShard_ID(*sh.ID) - si.SetMetabasePath(sh.MetaBaseInfo.Path) - si.Blobstor = blobstorInfoToProto(sh.BlobStorInfo) - si.SetWritecachePath(sh.WriteCacheInfo.Path) - si.SetPiloramaPath(sh.PiloramaInfo.Path) - - var m control.ShardMode - - switch sh.Mode { - case mode.ReadWrite: - m = control.ShardMode_READ_WRITE - case mode.ReadOnly: - m = control.ShardMode_READ_ONLY - case mode.Degraded: - m = control.ShardMode_DEGRADED - case mode.DegradedReadOnly: - m = control.ShardMode_DEGRADED_READ_ONLY - default: - m = control.ShardMode_SHARD_MODE_UNDEFINED - } - - si.SetMode(m) - si.SetErrorCount(sh.ErrorCount) - si.SetEvacuationInProgress(sh.EvacuationInProgress) - - shardInfos = append(shardInfos, *si) - } - - body.SetShards(shardInfos) - - // sign the response - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func blobstorInfoToProto(info blobstor.Info) []control.BlobstorInfo { - res := make([]control.BlobstorInfo, len(info.SubStorages)) - for i := range info.SubStorages { - res[i] = control.BlobstorInfo{ - Path: info.SubStorages[i].Path, - Type: info.SubStorages[i].Type, - } - } - return res -} diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go deleted file mode 100644 index 39565ed50..000000000 --- a/pkg/services/control/server/list_shards_for_object.go +++ /dev/null @@ -1,65 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - var obj oid.ID - err = obj.DecodeString(req.GetBody().GetObjectId()) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - var cnr cid.ID - err = cnr.DecodeString(req.GetBody().GetContainerId()) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - resp := new(control.ListShardsForObjectResponse) - body := new(control.ListShardsForObjectResponse_Body) - resp.SetBody(body) - - var objAddr oid.Address - objAddr.SetContainer(cnr) - objAddr.SetObject(obj) - info, err := s.s.ListShardsForObject(ctx, objAddr) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - if len(info) == 0 { - return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject) - } - - body.SetShard_ID(shardInfoToProto(info)) - - // Sign the response - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func shardInfoToProto(infos []shard.Info) [][]byte { - shardInfos := make([][]byte, 0, len(infos)) - for _, info := range infos { - shardInfos = append(shardInfos, *info.ID) - } - - return shardInfos -} diff --git a/pkg/services/control/server/policy_engine.go b/pkg/services/control/server/policy_engine.go deleted file mode 100644 index ab8258e27..000000000 --- a/pkg/services/control/server/policy_engine.go +++ /dev/null @@ -1,250 +0,0 @@ -package control - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func apeTarget(chainTarget *control.ChainTarget) (engine.Target, error) { - switch chainTarget.GetType() { - case control.ChainTarget_CONTAINER: - return engine.ContainerTarget(chainTarget.GetName()), nil - case control.ChainTarget_NAMESPACE: - return engine.NamespaceTarget(chainTarget.GetName()), nil - case control.ChainTarget_USER: - return engine.UserTarget(chainTarget.GetName()), nil - case control.ChainTarget_GROUP: - return engine.GroupTarget(chainTarget.GetName()), nil - default: - } - return engine.Target{}, status.Error(codes.InvalidArgument, - fmt.Errorf("target type is not supported: %s", chainTarget.GetType().String()).Error()) -} - -func controlTarget(chainTarget *engine.Target) (control.ChainTarget, error) { - switch chainTarget.Type { - case engine.Container: - return control.ChainTarget{ - Name: chainTarget.Name, - Type: control.ChainTarget_CONTAINER, - }, nil - case engine.Namespace: - // If namespace is empty, we take it for root namespace. - nm := chainTarget.Name - if nm == "root" { - nm = "" - } - return control.ChainTarget{ - Name: nm, - Type: control.ChainTarget_NAMESPACE, - }, nil - case engine.User: - return control.ChainTarget{ - Name: chainTarget.Name, - Type: control.ChainTarget_USER, - }, nil - case engine.Group: - return control.ChainTarget{ - Name: chainTarget.Name, - Type: control.ChainTarget_GROUP, - }, nil - default: - } - return control.ChainTarget{}, status.Error(codes.InvalidArgument, - fmt.Errorf("target type is not supported: %c", chainTarget.Type).Error()) -} - -func (s *Server) AddChainLocalOverride(_ context.Context, req *control.AddChainLocalOverrideRequest) (*control.AddChainLocalOverrideResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - var chain apechain.Chain - if err := chain.DecodeBytes(req.GetBody().GetChain()); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - for _, rule := range chain.Rules { - for _, name := range rule.Resources.Names { - if err := ape.ValidateResourceName(name); err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Errorf("invalid resource: %w", err).Error()) - } - } - } - - s.apeChainCounter.Add(1) - // TODO (aarifullin): the such chain id is not well-designed yet. - if len(chain.ID) == 0 { - chain.ID = apechain.ID(fmt.Sprintf("%s:%d", apechain.Ingress, s.apeChainCounter.Load())) - } - - target, err := apeTarget(req.GetBody().GetTarget()) - if err != nil { - return nil, err - } - - if _, err = s.localOverrideStorage.LocalStorage().AddOverride(apechain.Ingress, target, &chain); err != nil { - return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) - } - - resp := &control.AddChainLocalOverrideResponse{ - Body: &control.AddChainLocalOverrideResponse_Body{ - ChainId: []byte(chain.ID), - }, - } - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) GetChainLocalOverride(_ context.Context, req *control.GetChainLocalOverrideRequest) (*control.GetChainLocalOverrideResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - target, err := apeTarget(req.GetBody().GetTarget()) - if err != nil { - return nil, err - } - chain, err := s.localOverrideStorage.LocalStorage().GetOverride(apechain.Ingress, target, apechain.ID(req.GetBody().GetChainId())) - if err != nil { - return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) - } - - resp := &control.GetChainLocalOverrideResponse{ - Body: &control.GetChainLocalOverrideResponse_Body{ - Chain: chain.Bytes(), - }, - } - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) ListChainLocalOverrides(_ context.Context, req *control.ListChainLocalOverridesRequest) (*control.ListChainLocalOverridesResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - target, err := apeTarget(req.GetBody().GetTarget()) - if err != nil { - return nil, err - } - - chains, err := s.localOverrideStorage.LocalStorage().ListOverrides(apechain.Ingress, target) - if err != nil { - return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) - } - serializedChains := make([][]byte, 0, len(chains)) - for _, chain := range chains { - serializedChains = append(serializedChains, chain.Bytes()) - } - - resp := &control.ListChainLocalOverridesResponse{ - Body: &control.ListChainLocalOverridesResponse_Body{ - Chains: serializedChains, - }, - } - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) RemoveChainLocalOverride(_ context.Context, req *control.RemoveChainLocalOverrideRequest) (*control.RemoveChainLocalOverrideResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - target, err := apeTarget(req.GetBody().GetTarget()) - if err != nil { - return nil, err - } - - if err = s.localOverrideStorage.LocalStorage().RemoveOverride(apechain.Ingress, target, req.GetBody().GetChainId()); err != nil { - return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) - } - resp := &control.RemoveChainLocalOverrideResponse{ - Body: &control.RemoveChainLocalOverrideResponse_Body{}, - } - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) RemoveChainLocalOverridesByTarget(_ context.Context, req *control.RemoveChainLocalOverridesByTargetRequest) (*control.RemoveChainLocalOverridesByTargetResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - target, err := apeTarget(req.GetBody().GetTarget()) - if err != nil { - return nil, err - } - - if err = s.localOverrideStorage.LocalStorage().RemoveOverridesByTarget(apechain.Ingress, target); err != nil { - return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) - } - resp := &control.RemoveChainLocalOverridesByTargetResponse{ - Body: &control.RemoveChainLocalOverridesByTargetResponse_Body{}, - } - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) ListTargetsLocalOverrides(_ context.Context, req *control.ListTargetsLocalOverridesRequest) (*control.ListTargetsLocalOverridesResponse, error) { - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - apeChainName := apechain.Name(req.GetBody().GetChainName()) - apeTargets, err := s.localOverrideStorage.LocalStorage().ListOverrideDefinedTargets(apeChainName) - if err != nil { - return nil, status.Error(getCodeByLocalStorageErr(err), err.Error()) - } - targets := make([]control.ChainTarget, 0, len(apeTargets)) - for i := range apeTargets { - target, err := controlTarget(&apeTargets[i]) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - targets = append(targets, target) - } - - resp := &control.ListTargetsLocalOverridesResponse{ - Body: &control.ListTargetsLocalOverridesResponse_Body{ - Targets: targets, - }, - } - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func getCodeByLocalStorageErr(err error) codes.Code { - if errors.Is(err, engine.ErrChainNotFound) || errors.Is(err, engine.ErrChainNameNotFound) || - errors.Is(err, engine.ErrResourceNotFound) { - return codes.NotFound - } - return codes.Internal -} diff --git a/pkg/services/control/server/rebuild.go b/pkg/services/control/server/rebuild.go deleted file mode 100644 index 6ddfb8bf4..000000000 --- a/pkg/services/control/server/rebuild.go +++ /dev/null @@ -1,59 +0,0 @@ -package control - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) StartShardRebuild(ctx context.Context, req *control.StartShardRebuildRequest) (*control.StartShardRebuildResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - if req.GetBody().GetConcurrencyLimit() == 0 || req.GetBody().GetConcurrencyLimit() > 10000 { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("concurrency limit must be in range (0; 10 000], current value %d", req.GetBody().GetConcurrencyLimit())) - } - - if req.GetBody().GetTargetFillPercent() == 0 || req.GetBody().GetTargetFillPercent() > 100 { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("fill percent must be in range (0; 100], current value %d", req.GetBody().GetTargetFillPercent())) - } - - prm := engine.RebuildPrm{ - ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()), - ConcurrencyLimit: req.GetBody().GetConcurrencyLimit(), - TargetFillPercent: req.GetBody().GetTargetFillPercent(), - } - - res, err := s.s.Rebuild(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.StartShardRebuildResponse{Body: &control.StartShardRebuildResponse_Body{}} - for _, r := range res.ShardResults { - if r.Success { - resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{ - Shard_ID: *r.ShardID, - Success: true, - }) - } else { - resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{ - Shard_ID: *r.ShardID, - Error: r.ErrorMsg, - }) - } - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go deleted file mode 100644 index 6799bdcac..000000000 --- a/pkg/services/control/server/seal_writecache.go +++ /dev/null @@ -1,52 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCacheRequest) (*control.SealWriteCacheResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - prm := engine.SealWriteCachePrm{ - ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - Async: req.GetBody().GetAsync(), - RestoreMode: req.GetBody().GetRestoreMode(), - Shrink: req.GetBody().GetShrink(), - } - - res, err := s.s.SealWriteCache(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.SealWriteCacheResponse{Body: &control.SealWriteCacheResponse_Body{}} - for _, r := range res.ShardResults { - if r.Success { - resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{ - Shard_ID: *r.ShardID, - Success: true, - }) - } else { - resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{ - Shard_ID: *r.ShardID, - Error: r.ErrorMsg, - }) - } - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go deleted file mode 100644 index 59d701bc6..000000000 --- a/pkg/services/control/server/server.go +++ /dev/null @@ -1,181 +0,0 @@ -package control - -import ( - "context" - "crypto/ecdsa" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" -) - -// Server is an entity that serves -// Control service on storage node. -type Server struct { - *cfg - - // TODO (aarifullin): this counter is used to assign id for rule chains - // added as local overrides and will be removed as soon as in-memory - // implementation will be replaced. - apeChainCounter atomic.Uint32 -} - -// HealthChecker is component interface for calculating -// the current health status of a node. -type HealthChecker interface { - // NetmapStatus must calculate and return current status of the node in FrostFS network map. - // - // If status can not be calculated for any reason, - // control.netmapStatus_STATUS_UNDEFINED should be returned. - NetmapStatus() control.NetmapStatus - - // HealthStatus must calculate and return current health status of the node application. - // - // If status can not be calculated for any reason, - // control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned. - HealthStatus() control.HealthStatus -} - -// NodeState is an interface of storage node network state. -type NodeState interface { - // SetNetmapStatus switches the storage node to the given network status. - // - // If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed - // in the network settings, the node additionally starts local maintenance. - SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error - - // ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE) - // but starts local maintenance regardless of the network settings. - ForceMaintenance(ctx context.Context) error - - GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) -} - -// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine -// interface methods. -type LocalOverrideStorageDecorator interface { - // LocalStorage method can be decorated by using sync primitives in the case if the local - // override storage state should be consistent for chain router. - LocalStorage() policyengine.LocalOverrideStorage -} - -// Option of the Server's constructor. -type Option func(*cfg) - -type cfg struct { - key *ecdsa.PrivateKey - - allowedKeys [][]byte - - healthChecker HealthChecker - - netMapSrc netmap.Source - - cnrSrc container.Source - - localOverrideStorage LocalOverrideStorageDecorator - - replicator *replicator.Replicator - - nodeState NodeState - - treeService TreeService - - s *engine.StorageEngine -} - -func defaultCfg() *cfg { - return &cfg{} -} - -// New creates, initializes and returns new Server instance. -func New(opts ...Option) *Server { - c := defaultCfg() - - for _, opt := range opts { - opt(c) - } - - return &Server{ - cfg: c, - } -} - -// WithKey returns option to set private key -// used for signing responses. -func WithKey(key *ecdsa.PrivateKey) Option { - return func(c *cfg) { - c.key = key - } -} - -// WithAuthorizedKeys returns option to add list of public -// keys that have rights to use Control service. -func WithAuthorizedKeys(keys [][]byte) Option { - return func(c *cfg) { - c.allowedKeys = append(c.allowedKeys, keys...) - } -} - -// WithHealthChecker returns option to set component -// to calculate node health status. -func WithHealthChecker(hc HealthChecker) Option { - return func(c *cfg) { - c.healthChecker = hc - } -} - -// WithNetMapSource returns option to set network map storage. -func WithNetMapSource(netMapSrc netmap.Source) Option { - return func(c *cfg) { - c.netMapSrc = netMapSrc - } -} - -// WithContainerSource returns option to set container storage. -func WithContainerSource(cnrSrc container.Source) Option { - return func(c *cfg) { - c.cnrSrc = cnrSrc - } -} - -// WithReplicator returns option to set network map storage. -func WithReplicator(r *replicator.Replicator) Option { - return func(c *cfg) { - c.replicator = r - } -} - -// WithNodeState returns option to set node network state component. -func WithNodeState(state NodeState) Option { - return func(c *cfg) { - c.nodeState = state - } -} - -// WithLocalStorage returns option to set local storage engine that -// contains information about shards. -func WithLocalStorage(engine *engine.StorageEngine) Option { - return func(c *cfg) { - c.s = engine - } -} - -// WithTreeService returns an option to set tree service. -func WithTreeService(s TreeService) Option { - return func(c *cfg) { - c.treeService = s - } -} - -// WithLocalOverrideStorage returns the option to set access policy engine -// chain override storage. -func WithLocalOverrideStorage(localOverrideStorage LocalOverrideStorageDecorator) Option { - return func(c *cfg) { - c.localOverrideStorage = localOverrideStorage - } -} diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go deleted file mode 100644 index 529041dca..000000000 --- a/pkg/services/control/server/set_netmap_status.go +++ /dev/null @@ -1,53 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// SetNetmapStatus sets node status in FrostFS network. -// -// If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { - // verify request - if err := s.isValidRequest(req); err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - var err error - bodyReq := req.GetBody() - st := bodyReq.GetStatus() - force := bodyReq.GetForceMaintenance() - - if force { - if st != control.NetmapStatus_MAINTENANCE { - return nil, status.Errorf(codes.InvalidArgument, - "force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE) - } - - err = s.nodeState.ForceMaintenance(ctx) - } else { - err = s.nodeState.SetNetmapStatus(ctx, st) - } - - if err != nil { - return nil, status.Error(codes.Aborted, err.Error()) - } - - // create and fill response - resp := new(control.SetNetmapStatusResponse) - - body := new(control.SetNetmapStatusResponse_Body) - resp.SetBody(body) - - // sign the response - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go deleted file mode 100644 index 4f8796263..000000000 --- a/pkg/services/control/server/set_shard_mode.go +++ /dev/null @@ -1,60 +0,0 @@ -package control - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) { - // verify request - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - var ( - m mode.Mode - - requestedMode = req.GetBody().GetMode() - ) - - switch requestedMode { - case control.ShardMode_READ_WRITE: - m = mode.ReadWrite - case control.ShardMode_READ_ONLY: - m = mode.ReadOnly - case control.ShardMode_DEGRADED: - m = mode.Degraded - case control.ShardMode_DEGRADED_READ_ONLY: - m = mode.DegradedReadOnly - default: - return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode)) - } - - for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) { - err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter()) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - } - - // create and fill response - resp := new(control.SetShardModeResponse) - - body := new(control.SetShardModeResponse_Body) - resp.SetBody(body) - - // sign the response - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go deleted file mode 100644 index 0e8e24b6e..000000000 --- a/pkg/services/control/server/sign.go +++ /dev/null @@ -1,61 +0,0 @@ -package control - -import ( - "bytes" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" -) - -var errDisallowedKey = errors.New("key is not in the allowed list") - -func (s *Server) isValidRequest(req ctrlmessage.SignedMessage) error { - sign := req.GetSignature() - if sign == nil { - // TODO(@cthulhu-rider): #468 use "const" error - return errors.New("missing signature") - } - - var ( - key = sign.GetKey() - allowed = false - ) - - // check if key is allowed - for i := range s.allowedKeys { - if allowed = bytes.Equal(s.allowedKeys[i], key); allowed { - break - } - } - - if !allowed { - return errDisallowedKey - } - - // verify signature - binBody, err := req.ReadSignedData(nil) - if err != nil { - return fmt.Errorf("marshal request body: %w", err) - } - - // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion - var sigV2 refs.Signature - sigV2.SetKey(sign.GetKey()) - sigV2.SetSign(sign.GetSign()) - sigV2.SetScheme(refs.ECDSA_SHA512) - - var sig frostfscrypto.Signature - if err := sig.ReadFromV2(sigV2); err != nil { - return fmt.Errorf("can't read signature: %w", err) - } - - if !sig.Verify(binBody) { - // TODO(@cthulhu-rider): #468 use "const" error - return errors.New("invalid signature") - } - - return nil -} diff --git a/pkg/services/control/server/syncronize_tree.go b/pkg/services/control/server/syncronize_tree.go deleted file mode 100644 index b2a966b2c..000000000 --- a/pkg/services/control/server/syncronize_tree.go +++ /dev/null @@ -1,52 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// TreeService represents a tree service instance. -type TreeService interface { - SynchronizeTree(ctx context.Context, cnr cid.ID, treeID string) error - ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req *tree.ApplyRequest) error -} - -func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTreeRequest) (*control.SynchronizeTreeResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - if s.treeService == nil { - return nil, status.Error(codes.Internal, "tree service is disabled") - } - - b := req.GetBody() - - var cnr cid.ID - if err := cnr.Decode(b.GetContainerId()); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - err = s.treeService.SynchronizeTree(ctx, cnr, b.GetTreeId()) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := new(control.SynchronizeTreeResponse) - resp.SetBody(new(control.SynchronizeTreeResponse_Body)) - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto deleted file mode 100644 index 4c539acfc..000000000 --- a/pkg/services/control/service.proto +++ /dev/null @@ -1,754 +0,0 @@ -syntax = "proto3"; - -package control; - -import "pkg/services/control/types.proto"; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"; - -// `ControlService` provides an interface for internal work with the storage -// node. -service ControlService { - // Performs health check of the storage node. - rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse); - - // Sets status of the storage node in FrostFS network map. - rpc SetNetmapStatus(SetNetmapStatusRequest) returns (SetNetmapStatusResponse); - - // Gets status of the storage node in FrostFS network map. - rpc GetNetmapStatus(GetNetmapStatusRequest) returns (GetNetmapStatusResponse); - - // Mark objects to be removed from node's local object storage. - rpc DropObjects(DropObjectsRequest) returns (DropObjectsResponse); - - // Returns list that contains information about all shards of a node. - rpc ListShards(ListShardsRequest) returns (ListShardsResponse); - - // Sets mode of the shard. - rpc SetShardMode(SetShardModeRequest) returns (SetShardModeResponse); - - // Synchronizes all log operations for the specified tree. - rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse); - - // StartShardEvacuation starts moving all data from one shard to the others. - rpc StartShardEvacuation(StartShardEvacuationRequest) - returns (StartShardEvacuationResponse); - - // GetShardEvacuationStatus returns evacuation status. - rpc GetShardEvacuationStatus(GetShardEvacuationStatusRequest) - returns (GetShardEvacuationStatusResponse); - - // ResetShardEvacuationStatus resets evacuation status if there is no running - // evacuation process. - rpc ResetShardEvacuationStatus(ResetShardEvacuationStatusRequest) - returns (ResetShardEvacuationStatusResponse); - - // StopShardEvacuation stops moving all data from one shard to the others. - rpc StopShardEvacuation(StopShardEvacuationRequest) - returns (StopShardEvacuationResponse); - - // FlushCache moves all data from one shard to the others. - rpc FlushCache(FlushCacheRequest) returns (FlushCacheResponse); - - // Doctor performs storage restructuring operations on engine. - rpc Doctor(DoctorRequest) returns (DoctorResponse); - - // Add local access policy engine overrides to a node. - rpc AddChainLocalOverride(AddChainLocalOverrideRequest) - returns (AddChainLocalOverrideResponse); - - // Get local access policy engine overrides stored in the node by chain id. - rpc GetChainLocalOverride(GetChainLocalOverrideRequest) - returns (GetChainLocalOverrideResponse); - - // List local access policy engine overrides stored in the node by container - // id. - rpc ListChainLocalOverrides(ListChainLocalOverridesRequest) - returns (ListChainLocalOverridesResponse); - - // Remove local access policy engine overrides stored in the node by chaind - // id. - rpc RemoveChainLocalOverride(RemoveChainLocalOverrideRequest) - returns (RemoveChainLocalOverrideResponse); - - // Remove local access policy engine overrides stored in the node by chaind - // id. - rpc RemoveChainLocalOverridesByTarget( - RemoveChainLocalOverridesByTargetRequest) - returns (RemoveChainLocalOverridesByTargetResponse); - - // List targets of the local APE overrides stored in the node. - rpc ListTargetsLocalOverrides(ListTargetsLocalOverridesRequest) - returns (ListTargetsLocalOverridesResponse); - - // Flush objects from write-cache and move it to degraded read only mode. - rpc SealWriteCache(SealWriteCacheRequest) returns (SealWriteCacheResponse); - - // DetachShards detaches and closes shards. - rpc DetachShards(DetachShardsRequest) returns (DetachShardsResponse); - - // StartShardRebuild starts shard rebuild process. - rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse); - - // ListShardsForObject returns shard info where object is stored. - rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse); -} - -// Health check request. -message HealthCheckRequest { - // Health check request body. - message Body {} - - // Body of health check request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Health check request. -message HealthCheckResponse { - // Health check response body - message Body { - // Status of the storage node in FrostFS network map. - NetmapStatus netmap_status = 1; - - // Health status of storage node application. - HealthStatus health_status = 2; - } - - // Body of health check response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Set netmap status request. -message SetNetmapStatusRequest { - // Set netmap status request body. - message Body { - // New storage node status in FrostFS network map. - // If status is MAINTENANCE, the node checks whether maintenance is - // allowed in the network settings. In case of prohibition, the request - // is denied. Otherwise, node switches to local maintenance state. To - // force local maintenance, use `force_maintenance` flag. - NetmapStatus status = 1; - - // MAINTENANCE status validation skip flag. If set, node starts local - // maintenance regardless of network settings. The flag MUST NOT be - // set for any other status. - bool force_maintenance = 2; - } - - // Body of set netmap status request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Set netmap status response. -message SetNetmapStatusResponse { - // Set netmap status response body - message Body {} - - // Body of set netmap status response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Get netmap status request. -message GetNetmapStatusRequest { - message Body {} - - // Body of set netmap status request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Get netmap status response. -message GetNetmapStatusResponse { - message Body { - // Storage node status in FrostFS network map. - NetmapStatus status = 1; - - // Network map epoch. - uint64 epoch = 2; - } - - // Body of get netmap status response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Request to drop the objects. -message DropObjectsRequest { - // Request body structure. - message Body { - // List of object addresses to be removed. - // in FrostFS API binary format. - repeated bytes address_list = 1; - } - - // Body of the request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Response to request to drop the objects. -message DropObjectsResponse { - // Response body structure. - message Body {} - - // Body of the response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Request to list all shards of the node. -message ListShardsRequest { - // Request body structure. - message Body {} - - // Body of the request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// ListShards response. -message ListShardsResponse { - // Response body structure. - message Body { - // List of the node's shards. - repeated ShardInfo shards = 1; - } - - // Body of the response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// Request to set mode of the shard. -message SetShardModeRequest { - // Request body structure. - message Body { - // ID of the shard. - repeated bytes shard_ID = 1; - - // Mode that requested to be set. - ShardMode mode = 2; - - // Flag signifying whether error counter should be set to 0. - bool resetErrorCounter = 3; - } - - // Body of set shard mode request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// SetShardMode response. -message SetShardModeResponse { - // Response body structure. - message Body {} - - // Body of set shard mode response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// SynchronizeTree request. -message SynchronizeTreeRequest { - // Request body structure. - message Body { - bytes container_id = 1; - string tree_id = 2; - // Starting height for the synchronization. Can be omitted. - uint64 height = 3; - } - - // Body of restore shard request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// SynchronizeTree response. -message SynchronizeTreeResponse { - // Response body structure. - message Body {} - - // Body of restore shard response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// EvacuateShard request. -message EvacuateShardRequest { - // Request body structure. - message Body { - // ID of the shard. - repeated bytes shard_ID = 1; - - // Flag indicating whether object read errors should be ignored. - bool ignore_errors = 2; - } - - Body body = 1; - Signature signature = 2; -} - -// EvacuateShard response. -message EvacuateShardResponse { - // Response body structure. - message Body { uint32 count = 1; } - - Body body = 1; - Signature signature = 2; -} - -// FlushCache request. -message FlushCacheRequest { - // Request body structure. - message Body { - // ID of the shard. - repeated bytes shard_ID = 1; - // If true, then writecache will be left in read-only mode after flush - // completed. - bool seal = 2; - } - - Body body = 1; - Signature signature = 2; -} - -// FlushCache response. -message FlushCacheResponse { - // Response body structure. - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// Doctor request. -message DoctorRequest { - // Request body structure. - message Body { - // Number of threads to use for the operation. - uint32 concurrency = 1; - // Flag to search engine for duplicate objects and leave only one copy. - bool remove_duplicates = 2; - } - - Body body = 1; - Signature signature = 2; -} - -// Doctor response. -message DoctorResponse { - // Response body structure. - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// StartShardEvacuation request. -message StartShardEvacuationRequest { - // Request body structure. - message Body { - enum Scope { - NONE = 0; - OBJECTS = 1; - TREES = 2; - } - - // IDs of the shards. - repeated bytes shard_ID = 1; - // Flag indicating whether object read errors should be ignored. - bool ignore_errors = 2; - // Evacuation scope. - uint32 scope = 3; - // Count of concurrent container evacuation workers. - uint32 container_worker_count = 4; - // Count of concurrent object evacuation workers. - uint32 object_worker_count = 5; - // Choose for evacuation objects in `REP 1` containers only. - bool rep_one_only = 6; - } - - Body body = 1; - Signature signature = 2; -} - -// StartShardEvacuation response. -message StartShardEvacuationResponse { - // Response body structure. - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// GetShardEvacuationStatus request. -message GetShardEvacuationStatusRequest { - // Request body structure. - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// GetShardEvacuationStatus response. -message GetShardEvacuationStatusResponse { - // Response body structure. - message Body { - // Evacuate status enum. - enum Status { - EVACUATE_SHARD_STATUS_UNDEFINED = 0; - RUNNING = 1; - COMPLETED = 2; - } - - // Unix timestamp value. - message UnixTimestamp { int64 value = 1; } - - // Duration in seconds. - message Duration { int64 seconds = 1; } - - // Total objects to evacuate count. The value is approximate, so evacuated + - // failed + skipped == total is not guaranteed after completion. - uint64 total_objects = 1; - // Evacuated objects count. - uint64 evacuated_objects = 2; - // Failed objects count. - uint64 failed_objects = 3; - - // Shard IDs. - repeated bytes shard_ID = 4; - // Evacuation process status. - Status status = 5; - // Evacuation process duration. - Duration duration = 6; - // Evacuation process started at timestamp. - UnixTimestamp started_at = 7; - // Error message if evacuation failed. - string error_message = 8; - - // Skipped objects count. - uint64 skipped_objects = 9; - - // Total trees to evacuate count. - uint64 total_trees = 10; - // Evacuated trees count. - uint64 evacuated_trees = 11; - // Failed trees count. - uint64 failed_trees = 12; - } - - Body body = 1; - Signature signature = 2; -} - -// ResetShardEvacuationStatus request. -message ResetShardEvacuationStatusRequest { - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// ResetShardEvacuationStatus response. -message ResetShardEvacuationStatusResponse { - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// StopShardEvacuation request. -message StopShardEvacuationRequest { - // Request body structure. - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// StopShardEvacuation response. -message StopShardEvacuationResponse { - // Response body structure. - message Body {} - - Body body = 1; - Signature signature = 2; -} - -// AddChainLocalOverride request. -message AddChainLocalOverrideRequest { - message Body { - // Target for which the overrides are applied. - ChainTarget target = 1; - - // Serialized rule chain. If chain ID is left empty - // in the chain, then it will be generated and returned - // in the response. - bytes chain = 2; - } - - Body body = 1; - - Signature signature = 2; -} - -// AddChainLocalOverride response. -message AddChainLocalOverrideResponse { - message Body { - // Chain ID assigned for the added rule chain. - // If chain ID is left empty in the request, then - // it will be generated. - bytes chain_id = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -// GetChainLocalOverride request. -message GetChainLocalOverrideRequest { - message Body { - // Target for which the overrides are applied. - ChainTarget target = 1; - - // Chain ID assigned for the added rule chain. - bytes chain_id = 2; - } - - Body body = 1; - - Signature signature = 2; -} - -// GetChainLocalOverride response. -message GetChainLocalOverrideResponse { - message Body { - // Serialized rule chain. - bytes chain = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -// ListChainLocalOverrides request. -message ListChainLocalOverridesRequest { - message Body { - // Target for which the overrides are applied. - ChainTarget target = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -// ListChainLocalOverrides response. -message ListChainLocalOverridesResponse { - message Body { - // The list of serialized rule chain. - repeated bytes chains = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -// ListTargetsLocalOverrides request. -message ListTargetsLocalOverridesRequest { - message Body { - // Target for which the overrides are applied. - string chainName = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -// ListTargetsLocalOverrides response. -message ListTargetsLocalOverridesResponse { - message Body { - // The list of chain targets. - repeated ChainTarget targets = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -message RemoveChainLocalOverrideRequest { - message Body { - // Target for which the overrides are applied. - ChainTarget target = 1; - - // Chain ID assigned for the added rule chain. - bytes chain_id = 2; - } - - Body body = 1; - - Signature signature = 2; -} - -message RemoveChainLocalOverrideResponse { - message Body {} - - Body body = 1; - - Signature signature = 2; -} - -message RemoveChainLocalOverridesByTargetRequest { - message Body { - // Target for which the overrides are applied. - ChainTarget target = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -message RemoveChainLocalOverridesByTargetResponse { - message Body {} - - Body body = 1; - - Signature signature = 2; -} - -message SealWriteCacheRequest { - // Request body structure. - message Body { - // ID of the shard. - repeated bytes shard_ID = 1; - - // Flag indicating whether object read errors should be ignored. - bool ignore_errors = 2; - - // Flag indicating whether writecache will be sealed async. - bool async = 3; - - // If true, then writecache will be sealed, but mode will be restored to the current one. - bool restore_mode = 4; - - // If true, then writecache will shrink internal storage. - bool shrink = 5; - } - - Body body = 1; - Signature signature = 2; -} - -message SealWriteCacheResponse { - message Body { - message Status { - bytes shard_ID = 1; - bool success = 2; - string error = 3; - } - repeated Status results = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -message DetachShardsRequest { - message Body { repeated bytes shard_ID = 1; } - - Body body = 1; - Signature signature = 2; -} - -message DetachShardsResponse { - message Body {} - - Body body = 1; - - Signature signature = 2; -} - -message StartShardRebuildRequest { - message Body { - repeated bytes shard_ID = 1; - uint32 target_fill_percent = 2; - uint32 concurrency_limit = 3; - } - - Body body = 1; - Signature signature = 2; -} - -message StartShardRebuildResponse { - message Body { - message Status { - bytes shard_ID = 1; - bool success = 2; - string error = 3; - } - repeated Status results = 1; - } - - Body body = 1; - - Signature signature = 2; -} - -message ListShardsForObjectRequest { - message Body { - string object_id = 1; - string container_id = 2; - } - - Body body = 1; - Signature signature = 2; -} - -message ListShardsForObjectResponse { - message Body { - // List of the node's shards storing object. - repeated bytes shard_ID = 1; - } - - Body body = 1; - Signature signature = 2; -} diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go deleted file mode 100644 index 44849d591..000000000 --- a/pkg/services/control/service_frostfs.pb.go +++ /dev/null @@ -1,18029 +0,0 @@ -// Code generated by protoc-gen-go-frostfs. DO NOT EDIT. - -package control - -import ( - json "encoding/json" - fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" - easyproto "github.com/VictoriaMetrics/easyproto" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" - strconv "strconv" -) - -type HealthCheckRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil) - _ json.Marshaler = (*HealthCheckRequest_Body)(nil) - _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthCheckRequest struct { - Body *HealthCheckRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil) - _ json.Marshaler = (*HealthCheckRequest)(nil) - _ json.Unmarshaler = (*HealthCheckRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *HealthCheckRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(HealthCheckRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) { - x.Body = v -} -func (x *HealthCheckRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *HealthCheckRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *HealthCheckRequest_Body - f = new(HealthCheckRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthCheckResponse_Body struct { - NetmapStatus NetmapStatus `json:"netmapStatus"` - HealthStatus HealthStatus `json:"healthStatus"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil) - _ json.Marshaler = (*HealthCheckResponse_Body)(nil) - _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.EnumSize(1, int32(x.NetmapStatus)) - size += proto.EnumSize(2, int32(x.HealthStatus)) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if int32(x.NetmapStatus) != 0 { - mm.AppendInt32(1, int32(x.NetmapStatus)) - } - if int32(x.HealthStatus) != 0 { - mm.AppendInt32(2, int32(x.HealthStatus)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body") - } - switch fc.FieldNum { - case 1: // NetmapStatus - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "NetmapStatus") - } - x.NetmapStatus = NetmapStatus(data) - case 2: // HealthStatus - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "HealthStatus") - } - x.HealthStatus = HealthStatus(data) - } - } - return nil -} -func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus { - if x != nil { - return x.NetmapStatus - } - return 0 -} -func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) { - x.NetmapStatus = v -} -func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus { - if x != nil { - return x.HealthStatus - } - return 0 -} -func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) { - x.HealthStatus = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"netmapStatus\":" - out.RawString(prefix) - v := int32(x.NetmapStatus) - if vv, ok := NetmapStatus_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"healthStatus\":" - out.RawString(prefix) - v := int32(x.HealthStatus) - if vv, ok := HealthStatus_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "netmapStatus": - { - var f NetmapStatus - var parsedValue NetmapStatus - switch v := in.Interface().(type) { - case string: - if vv, ok := NetmapStatus_value[v]; ok { - parsedValue = NetmapStatus(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = NetmapStatus(vv) - case float64: - parsedValue = NetmapStatus(v) - } - f = parsedValue - x.NetmapStatus = f - } - case "healthStatus": - { - var f HealthStatus - var parsedValue HealthStatus - switch v := in.Interface().(type) { - case string: - if vv, ok := HealthStatus_value[v]; ok { - parsedValue = HealthStatus(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = HealthStatus(vv) - case float64: - parsedValue = HealthStatus(v) - } - f = parsedValue - x.HealthStatus = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthCheckResponse struct { - Body *HealthCheckResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil) - _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil) - _ json.Marshaler = (*HealthCheckResponse)(nil) - _ json.Unmarshaler = (*HealthCheckResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthCheckResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *HealthCheckResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(HealthCheckResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { - x.Body = v -} -func (x *HealthCheckResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *HealthCheckResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *HealthCheckResponse_Body - f = new(HealthCheckResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetNetmapStatusRequest_Body struct { - Status NetmapStatus `json:"status"` - ForceMaintenance bool `json:"forceMaintenance"` -} - -var ( - _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest_Body)(nil) - _ json.Marshaler = (*SetNetmapStatusRequest_Body)(nil) - _ json.Unmarshaler = (*SetNetmapStatusRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetNetmapStatusRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.EnumSize(1, int32(x.Status)) - size += proto.BoolSize(2, x.ForceMaintenance) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if int32(x.Status) != 0 { - mm.AppendInt32(1, int32(x.Status)) - } - if x.ForceMaintenance { - mm.AppendBool(2, x.ForceMaintenance) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest_Body") - } - switch fc.FieldNum { - case 1: // Status - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Status") - } - x.Status = NetmapStatus(data) - case 2: // ForceMaintenance - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ForceMaintenance") - } - x.ForceMaintenance = data - } - } - return nil -} -func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus { - if x != nil { - return x.Status - } - return 0 -} -func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) { - x.Status = v -} -func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool { - if x != nil { - return x.ForceMaintenance - } - return false -} -func (x *SetNetmapStatusRequest_Body) SetForceMaintenance(v bool) { - x.ForceMaintenance = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"status\":" - out.RawString(prefix) - v := int32(x.Status) - if vv, ok := NetmapStatus_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"forceMaintenance\":" - out.RawString(prefix) - out.Bool(x.ForceMaintenance) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "status": - { - var f NetmapStatus - var parsedValue NetmapStatus - switch v := in.Interface().(type) { - case string: - if vv, ok := NetmapStatus_value[v]; ok { - parsedValue = NetmapStatus(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = NetmapStatus(vv) - case float64: - parsedValue = NetmapStatus(v) - } - f = parsedValue - x.Status = f - } - case "forceMaintenance": - { - var f bool - f = in.Bool() - x.ForceMaintenance = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetNetmapStatusRequest struct { - Body *SetNetmapStatusRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest)(nil) - _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest)(nil) - _ json.Marshaler = (*SetNetmapStatusRequest)(nil) - _ json.Unmarshaler = (*SetNetmapStatusRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetNetmapStatusRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SetNetmapStatusRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SetNetmapStatusRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) { - x.Body = v -} -func (x *SetNetmapStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SetNetmapStatusRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetNetmapStatusRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetNetmapStatusRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SetNetmapStatusRequest_Body - f = new(SetNetmapStatusRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetNetmapStatusResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse_Body)(nil) - _ json.Marshaler = (*SetNetmapStatusResponse_Body)(nil) - _ json.Unmarshaler = (*SetNetmapStatusResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetNetmapStatusResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetNetmapStatusResponse struct { - Body *SetNetmapStatusResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse)(nil) - _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse)(nil) - _ json.Marshaler = (*SetNetmapStatusResponse)(nil) - _ json.Unmarshaler = (*SetNetmapStatusResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetNetmapStatusResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SetNetmapStatusResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SetNetmapStatusResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) { - x.Body = v -} -func (x *SetNetmapStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SetNetmapStatusResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetNetmapStatusResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetNetmapStatusResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SetNetmapStatusResponse_Body - f = new(SetNetmapStatusResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNetmapStatusRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest_Body)(nil) - _ json.Marshaler = (*GetNetmapStatusRequest_Body)(nil) - _ json.Unmarshaler = (*GetNetmapStatusRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNetmapStatusRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNetmapStatusRequest struct { - Body *GetNetmapStatusRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest)(nil) - _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest)(nil) - _ json.Marshaler = (*GetNetmapStatusRequest)(nil) - _ json.Unmarshaler = (*GetNetmapStatusRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNetmapStatusRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetNetmapStatusRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetNetmapStatusRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetNetmapStatusRequest) SetBody(v *GetNetmapStatusRequest_Body) { - x.Body = v -} -func (x *GetNetmapStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetNetmapStatusRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNetmapStatusRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNetmapStatusRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetNetmapStatusRequest_Body - f = new(GetNetmapStatusRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNetmapStatusResponse_Body struct { - Status NetmapStatus `json:"status"` - Epoch uint64 `json:"epoch"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse_Body)(nil) - _ json.Marshaler = (*GetNetmapStatusResponse_Body)(nil) - _ json.Unmarshaler = (*GetNetmapStatusResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNetmapStatusResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.EnumSize(1, int32(x.Status)) - size += proto.UInt64Size(2, x.Epoch) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if int32(x.Status) != 0 { - mm.AppendInt32(1, int32(x.Status)) - } - if x.Epoch != 0 { - mm.AppendUint64(2, x.Epoch) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse_Body") - } - switch fc.FieldNum { - case 1: // Status - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Status") - } - x.Status = NetmapStatus(data) - case 2: // Epoch - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Epoch") - } - x.Epoch = data - } - } - return nil -} -func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus { - if x != nil { - return x.Status - } - return 0 -} -func (x *GetNetmapStatusResponse_Body) SetStatus(v NetmapStatus) { - x.Status = v -} -func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 { - if x != nil { - return x.Epoch - } - return 0 -} -func (x *GetNetmapStatusResponse_Body) SetEpoch(v uint64) { - x.Epoch = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"status\":" - out.RawString(prefix) - v := int32(x.Status) - if vv, ok := NetmapStatus_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"epoch\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "status": - { - var f NetmapStatus - var parsedValue NetmapStatus - switch v := in.Interface().(type) { - case string: - if vv, ok := NetmapStatus_value[v]; ok { - parsedValue = NetmapStatus(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = NetmapStatus(vv) - case float64: - parsedValue = NetmapStatus(v) - } - f = parsedValue - x.Status = f - } - case "epoch": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.Epoch = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNetmapStatusResponse struct { - Body *GetNetmapStatusResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse)(nil) - _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse)(nil) - _ json.Marshaler = (*GetNetmapStatusResponse)(nil) - _ json.Unmarshaler = (*GetNetmapStatusResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNetmapStatusResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetNetmapStatusResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetNetmapStatusResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetNetmapStatusResponse) SetBody(v *GetNetmapStatusResponse_Body) { - x.Body = v -} -func (x *GetNetmapStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetNetmapStatusResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNetmapStatusResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNetmapStatusResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetNetmapStatusResponse_Body - f = new(GetNetmapStatusResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DropObjectsRequest_Body struct { - AddressList [][]byte `json:"addressList"` -} - -var ( - _ encoding.ProtoMarshaler = (*DropObjectsRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*DropObjectsRequest_Body)(nil) - _ json.Marshaler = (*DropObjectsRequest_Body)(nil) - _ json.Unmarshaler = (*DropObjectsRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DropObjectsRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.AddressList) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DropObjectsRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DropObjectsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.AddressList { - mm.AppendBytes(1, x.AddressList[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DropObjectsRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest_Body") - } - switch fc.FieldNum { - case 1: // AddressList - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "AddressList") - } - x.AddressList = append(x.AddressList, data) - } - } - return nil -} -func (x *DropObjectsRequest_Body) GetAddressList() [][]byte { - if x != nil { - return x.AddressList - } - return nil -} -func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) { - x.AddressList = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DropObjectsRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"addressList\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.AddressList { - if i != 0 { - out.RawByte(',') - } - if x.AddressList[i] != nil { - out.Base64Bytes(x.AddressList[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DropObjectsRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "addressList": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.AddressList = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DropObjectsRequest struct { - Body *DropObjectsRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*DropObjectsRequest)(nil) - _ encoding.ProtoUnmarshaler = (*DropObjectsRequest)(nil) - _ json.Marshaler = (*DropObjectsRequest)(nil) - _ json.Unmarshaler = (*DropObjectsRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DropObjectsRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *DropObjectsRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *DropObjectsRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DropObjectsRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DropObjectsRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(DropObjectsRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) { - x.Body = v -} -func (x *DropObjectsRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *DropObjectsRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DropObjectsRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DropObjectsRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DropObjectsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *DropObjectsRequest_Body - f = new(DropObjectsRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DropObjectsResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*DropObjectsResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*DropObjectsResponse_Body)(nil) - _ json.Marshaler = (*DropObjectsResponse_Body)(nil) - _ json.Unmarshaler = (*DropObjectsResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DropObjectsResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DropObjectsResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DropObjectsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DropObjectsResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DropObjectsResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DropObjectsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DropObjectsResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DropObjectsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DropObjectsResponse struct { - Body *DropObjectsResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*DropObjectsResponse)(nil) - _ encoding.ProtoUnmarshaler = (*DropObjectsResponse)(nil) - _ json.Marshaler = (*DropObjectsResponse)(nil) - _ json.Unmarshaler = (*DropObjectsResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DropObjectsResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *DropObjectsResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *DropObjectsResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DropObjectsResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DropObjectsResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(DropObjectsResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) { - x.Body = v -} -func (x *DropObjectsResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *DropObjectsResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DropObjectsResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DropObjectsResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DropObjectsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *DropObjectsResponse_Body - f = new(DropObjectsResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsRequest_Body)(nil) - _ json.Marshaler = (*ListShardsRequest_Body)(nil) - _ json.Unmarshaler = (*ListShardsRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsRequest struct { - Body *ListShardsRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsRequest)(nil) - _ json.Marshaler = (*ListShardsRequest)(nil) - _ json.Unmarshaler = (*ListShardsRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListShardsRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListShardsRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListShardsRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) { - x.Body = v -} -func (x *ListShardsRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListShardsRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListShardsRequest_Body - f = new(ListShardsRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsResponse_Body struct { - Shards []ShardInfo `json:"shards"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsResponse_Body)(nil) - _ json.Marshaler = (*ListShardsResponse_Body)(nil) - _ json.Unmarshaler = (*ListShardsResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - for i := range x.Shards { - size += proto.NestedStructureSizeUnchecked(1, &x.Shards[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for i := range x.Shards { - x.Shards[i].EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsResponse_Body") - } - switch fc.FieldNum { - case 1: // Shards - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shards") - } - x.Shards = append(x.Shards, ShardInfo{}) - ff := &x.Shards[len(x.Shards)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsResponse_Body) GetShards() []ShardInfo { - if x != nil { - return x.Shards - } - return nil -} -func (x *ListShardsResponse_Body) SetShards(v []ShardInfo) { - x.Shards = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shards\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shards { - if i != 0 { - out.RawByte(',') - } - x.Shards[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shards": - { - var f ShardInfo - var list []ShardInfo - in.Delim('[') - for !in.IsDelim(']') { - f = ShardInfo{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Shards = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsResponse struct { - Body *ListShardsResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsResponse)(nil) - _ json.Marshaler = (*ListShardsResponse)(nil) - _ json.Unmarshaler = (*ListShardsResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListShardsResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListShardsResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListShardsResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) { - x.Body = v -} -func (x *ListShardsResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListShardsResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListShardsResponse_Body - f = new(ListShardsResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetShardModeRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - Mode ShardMode `json:"mode"` - ResetErrorCounter bool `json:"resetErrorCounter"` -} - -var ( - _ encoding.ProtoMarshaler = (*SetShardModeRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SetShardModeRequest_Body)(nil) - _ json.Marshaler = (*SetShardModeRequest_Body)(nil) - _ json.Unmarshaler = (*SetShardModeRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetShardModeRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - size += proto.EnumSize(2, int32(x.Mode)) - size += proto.BoolSize(3, x.ResetErrorCounter) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetShardModeRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetShardModeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } - if int32(x.Mode) != 0 { - mm.AppendInt32(2, int32(x.Mode)) - } - if x.ResetErrorCounter { - mm.AppendBool(3, x.ResetErrorCounter) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetShardModeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 2: // Mode - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Mode") - } - x.Mode = ShardMode(data) - case 3: // ResetErrorCounter - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ResetErrorCounter") - } - x.ResetErrorCounter = data - } - } - return nil -} -func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *SetShardModeRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *SetShardModeRequest_Body) GetMode() ShardMode { - if x != nil { - return x.Mode - } - return 0 -} -func (x *SetShardModeRequest_Body) SetMode(v ShardMode) { - x.Mode = v -} -func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool { - if x != nil { - return x.ResetErrorCounter - } - return false -} -func (x *SetShardModeRequest_Body) SetResetErrorCounter(v bool) { - x.ResetErrorCounter = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetShardModeRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"mode\":" - out.RawString(prefix) - v := int32(x.Mode) - if vv, ok := ShardMode_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"resetErrorCounter\":" - out.RawString(prefix) - out.Bool(x.ResetErrorCounter) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetShardModeRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "mode": - { - var f ShardMode - var parsedValue ShardMode - switch v := in.Interface().(type) { - case string: - if vv, ok := ShardMode_value[v]; ok { - parsedValue = ShardMode(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = ShardMode(vv) - case float64: - parsedValue = ShardMode(v) - } - f = parsedValue - x.Mode = f - } - case "resetErrorCounter": - { - var f bool - f = in.Bool() - x.ResetErrorCounter = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetShardModeRequest struct { - Body *SetShardModeRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SetShardModeRequest)(nil) - _ encoding.ProtoUnmarshaler = (*SetShardModeRequest)(nil) - _ json.Marshaler = (*SetShardModeRequest)(nil) - _ json.Unmarshaler = (*SetShardModeRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetShardModeRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SetShardModeRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SetShardModeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetShardModeRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetShardModeRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SetShardModeRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) { - x.Body = v -} -func (x *SetShardModeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SetShardModeRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetShardModeRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetShardModeRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetShardModeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SetShardModeRequest_Body - f = new(SetShardModeRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetShardModeResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*SetShardModeResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SetShardModeResponse_Body)(nil) - _ json.Marshaler = (*SetShardModeResponse_Body)(nil) - _ json.Unmarshaler = (*SetShardModeResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetShardModeResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetShardModeResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetShardModeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetShardModeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetShardModeResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetShardModeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetShardModeResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetShardModeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SetShardModeResponse struct { - Body *SetShardModeResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SetShardModeResponse)(nil) - _ encoding.ProtoUnmarshaler = (*SetShardModeResponse)(nil) - _ json.Marshaler = (*SetShardModeResponse)(nil) - _ json.Unmarshaler = (*SetShardModeResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SetShardModeResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SetShardModeResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SetShardModeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SetShardModeResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SetShardModeResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SetShardModeResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) { - x.Body = v -} -func (x *SetShardModeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SetShardModeResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SetShardModeResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SetShardModeResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SetShardModeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SetShardModeResponse_Body - f = new(SetShardModeResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SynchronizeTreeRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - Height uint64 `json:"height"` -} - -var ( - _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest_Body)(nil) - _ json.Marshaler = (*SynchronizeTreeRequest_Body)(nil) - _ json.Unmarshaler = (*SynchronizeTreeRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SynchronizeTreeRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.UInt64Size(3, x.Height) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SynchronizeTreeRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SynchronizeTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if x.Height != 0 { - mm.AppendUint64(3, x.Height) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SynchronizeTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // Height - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Height") - } - x.Height = data - } - } - return nil -} -func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *SynchronizeTreeRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *SynchronizeTreeRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *SynchronizeTreeRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 { - if x != nil { - return x.Height - } - return 0 -} -func (x *SynchronizeTreeRequest_Body) SetHeight(v uint64) { - x.Height = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SynchronizeTreeRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"height\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SynchronizeTreeRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "height": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.Height = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SynchronizeTreeRequest struct { - Body *SynchronizeTreeRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest)(nil) - _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest)(nil) - _ json.Marshaler = (*SynchronizeTreeRequest)(nil) - _ json.Unmarshaler = (*SynchronizeTreeRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SynchronizeTreeRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SynchronizeTreeRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SynchronizeTreeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SynchronizeTreeRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SynchronizeTreeRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SynchronizeTreeRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) { - x.Body = v -} -func (x *SynchronizeTreeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SynchronizeTreeRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SynchronizeTreeRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SynchronizeTreeRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SynchronizeTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SynchronizeTreeRequest_Body - f = new(SynchronizeTreeRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SynchronizeTreeResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse_Body)(nil) - _ json.Marshaler = (*SynchronizeTreeResponse_Body)(nil) - _ json.Unmarshaler = (*SynchronizeTreeResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SynchronizeTreeResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SynchronizeTreeResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SynchronizeTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SynchronizeTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SynchronizeTreeResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SynchronizeTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SynchronizeTreeResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SynchronizeTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SynchronizeTreeResponse struct { - Body *SynchronizeTreeResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse)(nil) - _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse)(nil) - _ json.Marshaler = (*SynchronizeTreeResponse)(nil) - _ json.Unmarshaler = (*SynchronizeTreeResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SynchronizeTreeResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SynchronizeTreeResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SynchronizeTreeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SynchronizeTreeResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SynchronizeTreeResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SynchronizeTreeResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) { - x.Body = v -} -func (x *SynchronizeTreeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SynchronizeTreeResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SynchronizeTreeResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SynchronizeTreeResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SynchronizeTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SynchronizeTreeResponse_Body - f = new(SynchronizeTreeResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type EvacuateShardRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - IgnoreErrors bool `json:"ignoreErrors"` -} - -var ( - _ encoding.ProtoMarshaler = (*EvacuateShardRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest_Body)(nil) - _ json.Marshaler = (*EvacuateShardRequest_Body)(nil) - _ json.Unmarshaler = (*EvacuateShardRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *EvacuateShardRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - size += proto.BoolSize(2, x.IgnoreErrors) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *EvacuateShardRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *EvacuateShardRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } - if x.IgnoreErrors { - mm.AppendBool(2, x.IgnoreErrors) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *EvacuateShardRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 2: // IgnoreErrors - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors") - } - x.IgnoreErrors = data - } - } - return nil -} -func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *EvacuateShardRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool { - if x != nil { - return x.IgnoreErrors - } - return false -} -func (x *EvacuateShardRequest_Body) SetIgnoreErrors(v bool) { - x.IgnoreErrors = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *EvacuateShardRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"ignoreErrors\":" - out.RawString(prefix) - out.Bool(x.IgnoreErrors) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *EvacuateShardRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "ignoreErrors": - { - var f bool - f = in.Bool() - x.IgnoreErrors = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type EvacuateShardRequest struct { - Body *EvacuateShardRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*EvacuateShardRequest)(nil) - _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest)(nil) - _ json.Marshaler = (*EvacuateShardRequest)(nil) - _ json.Unmarshaler = (*EvacuateShardRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *EvacuateShardRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *EvacuateShardRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *EvacuateShardRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *EvacuateShardRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *EvacuateShardRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(EvacuateShardRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *EvacuateShardRequest) SetBody(v *EvacuateShardRequest_Body) { - x.Body = v -} -func (x *EvacuateShardRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *EvacuateShardRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *EvacuateShardRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *EvacuateShardRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *EvacuateShardRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *EvacuateShardRequest_Body - f = new(EvacuateShardRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type EvacuateShardResponse_Body struct { - Count uint32 `json:"count"` -} - -var ( - _ encoding.ProtoMarshaler = (*EvacuateShardResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse_Body)(nil) - _ json.Marshaler = (*EvacuateShardResponse_Body)(nil) - _ json.Unmarshaler = (*EvacuateShardResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *EvacuateShardResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt32Size(1, x.Count) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *EvacuateShardResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *EvacuateShardResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Count != 0 { - mm.AppendUint32(1, x.Count) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *EvacuateShardResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse_Body") - } - switch fc.FieldNum { - case 1: // Count - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Count") - } - x.Count = data - } - } - return nil -} -func (x *EvacuateShardResponse_Body) GetCount() uint32 { - if x != nil { - return x.Count - } - return 0 -} -func (x *EvacuateShardResponse_Body) SetCount(v uint32) { - x.Count = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *EvacuateShardResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"count\":" - out.RawString(prefix) - out.Uint32(x.Count) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *EvacuateShardResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "count": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Count = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type EvacuateShardResponse struct { - Body *EvacuateShardResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*EvacuateShardResponse)(nil) - _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse)(nil) - _ json.Marshaler = (*EvacuateShardResponse)(nil) - _ json.Unmarshaler = (*EvacuateShardResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *EvacuateShardResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *EvacuateShardResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *EvacuateShardResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *EvacuateShardResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *EvacuateShardResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(EvacuateShardResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *EvacuateShardResponse) SetBody(v *EvacuateShardResponse_Body) { - x.Body = v -} -func (x *EvacuateShardResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *EvacuateShardResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *EvacuateShardResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *EvacuateShardResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *EvacuateShardResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *EvacuateShardResponse_Body - f = new(EvacuateShardResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type FlushCacheRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - Seal bool `json:"seal"` -} - -var ( - _ encoding.ProtoMarshaler = (*FlushCacheRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*FlushCacheRequest_Body)(nil) - _ json.Marshaler = (*FlushCacheRequest_Body)(nil) - _ json.Unmarshaler = (*FlushCacheRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *FlushCacheRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - size += proto.BoolSize(2, x.Seal) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *FlushCacheRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *FlushCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } - if x.Seal { - mm.AppendBool(2, x.Seal) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *FlushCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 2: // Seal - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Seal") - } - x.Seal = data - } - } - return nil -} -func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *FlushCacheRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *FlushCacheRequest_Body) GetSeal() bool { - if x != nil { - return x.Seal - } - return false -} -func (x *FlushCacheRequest_Body) SetSeal(v bool) { - x.Seal = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *FlushCacheRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"seal\":" - out.RawString(prefix) - out.Bool(x.Seal) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *FlushCacheRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "seal": - { - var f bool - f = in.Bool() - x.Seal = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type FlushCacheRequest struct { - Body *FlushCacheRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*FlushCacheRequest)(nil) - _ encoding.ProtoUnmarshaler = (*FlushCacheRequest)(nil) - _ json.Marshaler = (*FlushCacheRequest)(nil) - _ json.Unmarshaler = (*FlushCacheRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *FlushCacheRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *FlushCacheRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *FlushCacheRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *FlushCacheRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *FlushCacheRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(FlushCacheRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *FlushCacheRequest) SetBody(v *FlushCacheRequest_Body) { - x.Body = v -} -func (x *FlushCacheRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *FlushCacheRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *FlushCacheRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *FlushCacheRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *FlushCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *FlushCacheRequest_Body - f = new(FlushCacheRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type FlushCacheResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*FlushCacheResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*FlushCacheResponse_Body)(nil) - _ json.Marshaler = (*FlushCacheResponse_Body)(nil) - _ json.Unmarshaler = (*FlushCacheResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *FlushCacheResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *FlushCacheResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *FlushCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *FlushCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *FlushCacheResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *FlushCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *FlushCacheResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *FlushCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type FlushCacheResponse struct { - Body *FlushCacheResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*FlushCacheResponse)(nil) - _ encoding.ProtoUnmarshaler = (*FlushCacheResponse)(nil) - _ json.Marshaler = (*FlushCacheResponse)(nil) - _ json.Unmarshaler = (*FlushCacheResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *FlushCacheResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *FlushCacheResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *FlushCacheResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *FlushCacheResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(FlushCacheResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *FlushCacheResponse) SetBody(v *FlushCacheResponse_Body) { - x.Body = v -} -func (x *FlushCacheResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *FlushCacheResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *FlushCacheResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *FlushCacheResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *FlushCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *FlushCacheResponse_Body - f = new(FlushCacheResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DoctorRequest_Body struct { - Concurrency uint32 `json:"concurrency"` - RemoveDuplicates bool `json:"removeDuplicates"` -} - -var ( - _ encoding.ProtoMarshaler = (*DoctorRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*DoctorRequest_Body)(nil) - _ json.Marshaler = (*DoctorRequest_Body)(nil) - _ json.Unmarshaler = (*DoctorRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DoctorRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt32Size(1, x.Concurrency) - size += proto.BoolSize(2, x.RemoveDuplicates) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DoctorRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DoctorRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Concurrency != 0 { - mm.AppendUint32(1, x.Concurrency) - } - if x.RemoveDuplicates { - mm.AppendBool(2, x.RemoveDuplicates) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DoctorRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DoctorRequest_Body") - } - switch fc.FieldNum { - case 1: // Concurrency - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Concurrency") - } - x.Concurrency = data - case 2: // RemoveDuplicates - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "RemoveDuplicates") - } - x.RemoveDuplicates = data - } - } - return nil -} -func (x *DoctorRequest_Body) GetConcurrency() uint32 { - if x != nil { - return x.Concurrency - } - return 0 -} -func (x *DoctorRequest_Body) SetConcurrency(v uint32) { - x.Concurrency = v -} -func (x *DoctorRequest_Body) GetRemoveDuplicates() bool { - if x != nil { - return x.RemoveDuplicates - } - return false -} -func (x *DoctorRequest_Body) SetRemoveDuplicates(v bool) { - x.RemoveDuplicates = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DoctorRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"concurrency\":" - out.RawString(prefix) - out.Uint32(x.Concurrency) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"removeDuplicates\":" - out.RawString(prefix) - out.Bool(x.RemoveDuplicates) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DoctorRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "concurrency": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Concurrency = f - } - case "removeDuplicates": - { - var f bool - f = in.Bool() - x.RemoveDuplicates = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DoctorRequest struct { - Body *DoctorRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*DoctorRequest)(nil) - _ encoding.ProtoUnmarshaler = (*DoctorRequest)(nil) - _ json.Marshaler = (*DoctorRequest)(nil) - _ json.Unmarshaler = (*DoctorRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DoctorRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *DoctorRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DoctorRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DoctorRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DoctorRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(DoctorRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *DoctorRequest) GetBody() *DoctorRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *DoctorRequest) SetBody(v *DoctorRequest_Body) { - x.Body = v -} -func (x *DoctorRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *DoctorRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DoctorRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DoctorRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DoctorRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *DoctorRequest_Body - f = new(DoctorRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DoctorResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*DoctorResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*DoctorResponse_Body)(nil) - _ json.Marshaler = (*DoctorResponse_Body)(nil) - _ json.Unmarshaler = (*DoctorResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DoctorResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DoctorResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DoctorResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DoctorResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DoctorResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DoctorResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DoctorResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DoctorResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DoctorResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DoctorResponse struct { - Body *DoctorResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*DoctorResponse)(nil) - _ encoding.ProtoUnmarshaler = (*DoctorResponse)(nil) - _ json.Marshaler = (*DoctorResponse)(nil) - _ json.Unmarshaler = (*DoctorResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DoctorResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *DoctorResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DoctorResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DoctorResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DoctorResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(DoctorResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *DoctorResponse) GetBody() *DoctorResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *DoctorResponse) SetBody(v *DoctorResponse_Body) { - x.Body = v -} -func (x *DoctorResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *DoctorResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DoctorResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DoctorResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DoctorResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *DoctorResponse_Body - f = new(DoctorResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardEvacuationRequest_Body_Scope int32 - -const ( - StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0 - StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1 - StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2 -) - -var ( - StartShardEvacuationRequest_Body_Scope_name = map[int32]string{ - 0: "NONE", - 1: "OBJECTS", - 2: "TREES", - } - StartShardEvacuationRequest_Body_Scope_value = map[string]int32{ - "NONE": 0, - "OBJECTS": 1, - "TREES": 2, - } -) - -func (x StartShardEvacuationRequest_Body_Scope) String() string { - if v, ok := StartShardEvacuationRequest_Body_Scope_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool { - if v, ok := StartShardEvacuationRequest_Body_Scope_value[s]; ok { - *x = StartShardEvacuationRequest_Body_Scope(v) - return true - } - return false -} - -type StartShardEvacuationRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - IgnoreErrors bool `json:"ignoreErrors"` - Scope uint32 `json:"scope"` - ContainerWorkerCount uint32 `json:"containerWorkerCount"` - ObjectWorkerCount uint32 `json:"objectWorkerCount"` - RepOneOnly bool `json:"repOneOnly"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest_Body)(nil) - _ json.Marshaler = (*StartShardEvacuationRequest_Body)(nil) - _ json.Unmarshaler = (*StartShardEvacuationRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardEvacuationRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - size += proto.BoolSize(2, x.IgnoreErrors) - size += proto.UInt32Size(3, x.Scope) - size += proto.UInt32Size(4, x.ContainerWorkerCount) - size += proto.UInt32Size(5, x.ObjectWorkerCount) - size += proto.BoolSize(6, x.RepOneOnly) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } - if x.IgnoreErrors { - mm.AppendBool(2, x.IgnoreErrors) - } - if x.Scope != 0 { - mm.AppendUint32(3, x.Scope) - } - if x.ContainerWorkerCount != 0 { - mm.AppendUint32(4, x.ContainerWorkerCount) - } - if x.ObjectWorkerCount != 0 { - mm.AppendUint32(5, x.ObjectWorkerCount) - } - if x.RepOneOnly { - mm.AppendBool(6, x.RepOneOnly) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 2: // IgnoreErrors - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors") - } - x.IgnoreErrors = data - case 3: // Scope - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Scope") - } - x.Scope = data - case 4: // ContainerWorkerCount - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount") - } - x.ContainerWorkerCount = data - case 5: // ObjectWorkerCount - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount") - } - x.ObjectWorkerCount = data - case 6: // RepOneOnly - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly") - } - x.RepOneOnly = data - } - } - return nil -} -func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *StartShardEvacuationRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool { - if x != nil { - return x.IgnoreErrors - } - return false -} -func (x *StartShardEvacuationRequest_Body) SetIgnoreErrors(v bool) { - x.IgnoreErrors = v -} -func (x *StartShardEvacuationRequest_Body) GetScope() uint32 { - if x != nil { - return x.Scope - } - return 0 -} -func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) { - x.Scope = v -} -func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 { - if x != nil { - return x.ContainerWorkerCount - } - return 0 -} -func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) { - x.ContainerWorkerCount = v -} -func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 { - if x != nil { - return x.ObjectWorkerCount - } - return 0 -} -func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) { - x.ObjectWorkerCount = v -} -func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool { - if x != nil { - return x.RepOneOnly - } - return false -} -func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) { - x.RepOneOnly = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"ignoreErrors\":" - out.RawString(prefix) - out.Bool(x.IgnoreErrors) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"scope\":" - out.RawString(prefix) - out.Uint32(x.Scope) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerWorkerCount\":" - out.RawString(prefix) - out.Uint32(x.ContainerWorkerCount) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"objectWorkerCount\":" - out.RawString(prefix) - out.Uint32(x.ObjectWorkerCount) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"repOneOnly\":" - out.RawString(prefix) - out.Bool(x.RepOneOnly) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "ignoreErrors": - { - var f bool - f = in.Bool() - x.IgnoreErrors = f - } - case "scope": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Scope = f - } - case "containerWorkerCount": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.ContainerWorkerCount = f - } - case "objectWorkerCount": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.ObjectWorkerCount = f - } - case "repOneOnly": - { - var f bool - f = in.Bool() - x.RepOneOnly = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardEvacuationRequest struct { - Body *StartShardEvacuationRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest)(nil) - _ json.Marshaler = (*StartShardEvacuationRequest)(nil) - _ json.Unmarshaler = (*StartShardEvacuationRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardEvacuationRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *StartShardEvacuationRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *StartShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(StartShardEvacuationRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *StartShardEvacuationRequest) SetBody(v *StartShardEvacuationRequest_Body) { - x.Body = v -} -func (x *StartShardEvacuationRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *StartShardEvacuationRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardEvacuationRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardEvacuationRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *StartShardEvacuationRequest_Body - f = new(StartShardEvacuationRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardEvacuationResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse_Body)(nil) - _ json.Marshaler = (*StartShardEvacuationResponse_Body)(nil) - _ json.Unmarshaler = (*StartShardEvacuationResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardEvacuationResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardEvacuationResponse struct { - Body *StartShardEvacuationResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse)(nil) - _ json.Marshaler = (*StartShardEvacuationResponse)(nil) - _ json.Unmarshaler = (*StartShardEvacuationResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardEvacuationResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *StartShardEvacuationResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *StartShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(StartShardEvacuationResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *StartShardEvacuationResponse) SetBody(v *StartShardEvacuationResponse_Body) { - x.Body = v -} -func (x *StartShardEvacuationResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *StartShardEvacuationResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardEvacuationResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardEvacuationResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *StartShardEvacuationResponse_Body - f = new(StartShardEvacuationResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetShardEvacuationStatusRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil) - _ json.Marshaler = (*GetShardEvacuationStatusRequest_Body)(nil) - _ json.Unmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetShardEvacuationStatusRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetShardEvacuationStatusRequest struct { - Body *GetShardEvacuationStatusRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest)(nil) - _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest)(nil) - _ json.Marshaler = (*GetShardEvacuationStatusRequest)(nil) - _ json.Unmarshaler = (*GetShardEvacuationStatusRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetShardEvacuationStatusRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetShardEvacuationStatusRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetShardEvacuationStatusRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetShardEvacuationStatusRequest) SetBody(v *GetShardEvacuationStatusRequest_Body) { - x.Body = v -} -func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetShardEvacuationStatusRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetShardEvacuationStatusRequest_Body - f = new(GetShardEvacuationStatusRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetShardEvacuationStatusResponse_Body_Status int32 - -const ( - GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0 - GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1 - GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2 -) - -var ( - GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{ - 0: "EVACUATE_SHARD_STATUS_UNDEFINED", - 1: "RUNNING", - 2: "COMPLETED", - } - GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{ - "EVACUATE_SHARD_STATUS_UNDEFINED": 0, - "RUNNING": 1, - "COMPLETED": 2, - } -) - -func (x GetShardEvacuationStatusResponse_Body_Status) String() string { - if v, ok := GetShardEvacuationStatusResponse_Body_Status_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *GetShardEvacuationStatusResponse_Body_Status) FromString(s string) bool { - if v, ok := GetShardEvacuationStatusResponse_Body_Status_value[s]; ok { - *x = GetShardEvacuationStatusResponse_Body_Status(v) - return true - } - return false -} - -type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct { - Value int64 `json:"value"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) - _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) - _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) - _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.Int64Size(1, x.Value) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Value != 0 { - mm.AppendInt64(1, x.Value) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_UnixTimestamp") - } - switch fc.FieldNum { - case 1: // Value - data, ok := fc.Int64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Value") - } - x.Value = data - } - } - return nil -} -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 { - if x != nil { - return x.Value - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) SetValue(v int64) { - x.Value = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"value\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "value": - { - var f int64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseInt(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := int64(v) - f = pv - x.Value = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetShardEvacuationStatusResponse_Body_Duration struct { - Seconds int64 `json:"seconds"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) - _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) - _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) - _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetShardEvacuationStatusResponse_Body_Duration) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.Int64Size(1, x.Seconds) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetShardEvacuationStatusResponse_Body_Duration) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Seconds != 0 { - mm.AppendInt64(1, x.Seconds) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_Duration") - } - switch fc.FieldNum { - case 1: // Seconds - data, ok := fc.Int64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Seconds") - } - x.Seconds = data - } - } - return nil -} -func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body_Duration) SetSeconds(v int64) { - x.Seconds = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"seconds\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "seconds": - { - var f int64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseInt(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := int64(v) - f = pv - x.Seconds = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetShardEvacuationStatusResponse_Body struct { - TotalObjects uint64 `json:"totalObjects"` - EvacuatedObjects uint64 `json:"evacuatedObjects"` - FailedObjects uint64 `json:"failedObjects"` - Shard_ID [][]byte `json:"shardID"` - Status GetShardEvacuationStatusResponse_Body_Status `json:"status"` - Duration *GetShardEvacuationStatusResponse_Body_Duration `json:"duration"` - StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `json:"startedAt"` - ErrorMessage string `json:"errorMessage"` - SkippedObjects uint64 `json:"skippedObjects"` - TotalTrees uint64 `json:"totalTrees"` - EvacuatedTrees uint64 `json:"evacuatedTrees"` - FailedTrees uint64 `json:"failedTrees"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil) - _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body)(nil) - _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetShardEvacuationStatusResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt64Size(1, x.TotalObjects) - size += proto.UInt64Size(2, x.EvacuatedObjects) - size += proto.UInt64Size(3, x.FailedObjects) - size += proto.RepeatedBytesSize(4, x.Shard_ID) - size += proto.EnumSize(5, int32(x.Status)) - size += proto.NestedStructureSize(6, x.Duration) - size += proto.NestedStructureSize(7, x.StartedAt) - size += proto.StringSize(8, x.ErrorMessage) - size += proto.UInt64Size(9, x.SkippedObjects) - size += proto.UInt64Size(10, x.TotalTrees) - size += proto.UInt64Size(11, x.EvacuatedTrees) - size += proto.UInt64Size(12, x.FailedTrees) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.TotalObjects != 0 { - mm.AppendUint64(1, x.TotalObjects) - } - if x.EvacuatedObjects != 0 { - mm.AppendUint64(2, x.EvacuatedObjects) - } - if x.FailedObjects != 0 { - mm.AppendUint64(3, x.FailedObjects) - } - for j := range x.Shard_ID { - mm.AppendBytes(4, x.Shard_ID[j]) - } - if int32(x.Status) != 0 { - mm.AppendInt32(5, int32(x.Status)) - } - if x.Duration != nil { - x.Duration.EmitProtobuf(mm.AppendMessage(6)) - } - if x.StartedAt != nil { - x.StartedAt.EmitProtobuf(mm.AppendMessage(7)) - } - if len(x.ErrorMessage) != 0 { - mm.AppendString(8, x.ErrorMessage) - } - if x.SkippedObjects != 0 { - mm.AppendUint64(9, x.SkippedObjects) - } - if x.TotalTrees != 0 { - mm.AppendUint64(10, x.TotalTrees) - } - if x.EvacuatedTrees != 0 { - mm.AppendUint64(11, x.EvacuatedTrees) - } - if x.FailedTrees != 0 { - mm.AppendUint64(12, x.FailedTrees) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body") - } - switch fc.FieldNum { - case 1: // TotalObjects - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TotalObjects") - } - x.TotalObjects = data - case 2: // EvacuatedObjects - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "EvacuatedObjects") - } - x.EvacuatedObjects = data - case 3: // FailedObjects - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "FailedObjects") - } - x.FailedObjects = data - case 4: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 5: // Status - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Status") - } - x.Status = GetShardEvacuationStatusResponse_Body_Status(data) - case 6: // Duration - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Duration") - } - x.Duration = new(GetShardEvacuationStatusResponse_Body_Duration) - if err := x.Duration.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 7: // StartedAt - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "StartedAt") - } - x.StartedAt = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp) - if err := x.StartedAt.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 8: // ErrorMessage - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ErrorMessage") - } - x.ErrorMessage = data - case 9: // SkippedObjects - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "SkippedObjects") - } - x.SkippedObjects = data - case 10: // TotalTrees - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TotalTrees") - } - x.TotalTrees = data - case 11: // EvacuatedTrees - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "EvacuatedTrees") - } - x.EvacuatedTrees = data - case 12: // FailedTrees - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "FailedTrees") - } - x.FailedTrees = data - } - } - return nil -} -func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 { - if x != nil { - return x.TotalObjects - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetTotalObjects(v uint64) { - x.TotalObjects = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 { - if x != nil { - return x.EvacuatedObjects - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedObjects(v uint64) { - x.EvacuatedObjects = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 { - if x != nil { - return x.FailedObjects - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetFailedObjects(v uint64) { - x.FailedObjects = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *GetShardEvacuationStatusResponse_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status { - if x != nil { - return x.Status - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetStatus(v GetShardEvacuationStatusResponse_Body_Status) { - x.Status = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration { - if x != nil { - return x.Duration - } - return nil -} -func (x *GetShardEvacuationStatusResponse_Body) SetDuration(v *GetShardEvacuationStatusResponse_Body_Duration) { - x.Duration = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp { - if x != nil { - return x.StartedAt - } - return nil -} -func (x *GetShardEvacuationStatusResponse_Body) SetStartedAt(v *GetShardEvacuationStatusResponse_Body_UnixTimestamp) { - x.StartedAt = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} -func (x *GetShardEvacuationStatusResponse_Body) SetErrorMessage(v string) { - x.ErrorMessage = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 { - if x != nil { - return x.SkippedObjects - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetSkippedObjects(v uint64) { - x.SkippedObjects = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 { - if x != nil { - return x.TotalTrees - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetTotalTrees(v uint64) { - x.TotalTrees = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 { - if x != nil { - return x.EvacuatedTrees - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedTrees(v uint64) { - x.EvacuatedTrees = v -} -func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 { - if x != nil { - return x.FailedTrees - } - return 0 -} -func (x *GetShardEvacuationStatusResponse_Body) SetFailedTrees(v uint64) { - x.FailedTrees = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"totalObjects\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"evacuatedObjects\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"failedObjects\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"status\":" - out.RawString(prefix) - v := int32(x.Status) - if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"duration\":" - out.RawString(prefix) - x.Duration.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"startedAt\":" - out.RawString(prefix) - x.StartedAt.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"errorMessage\":" - out.RawString(prefix) - out.String(x.ErrorMessage) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"skippedObjects\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"totalTrees\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"evacuatedTrees\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"failedTrees\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "totalObjects": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.TotalObjects = f - } - case "evacuatedObjects": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.EvacuatedObjects = f - } - case "failedObjects": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.FailedObjects = f - } - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "status": - { - var f GetShardEvacuationStatusResponse_Body_Status - var parsedValue GetShardEvacuationStatusResponse_Body_Status - switch v := in.Interface().(type) { - case string: - if vv, ok := GetShardEvacuationStatusResponse_Body_Status_value[v]; ok { - parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv) - case float64: - parsedValue = GetShardEvacuationStatusResponse_Body_Status(v) - } - f = parsedValue - x.Status = f - } - case "duration": - { - var f *GetShardEvacuationStatusResponse_Body_Duration - f = new(GetShardEvacuationStatusResponse_Body_Duration) - f.UnmarshalEasyJSON(in) - x.Duration = f - } - case "startedAt": - { - var f *GetShardEvacuationStatusResponse_Body_UnixTimestamp - f = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp) - f.UnmarshalEasyJSON(in) - x.StartedAt = f - } - case "errorMessage": - { - var f string - f = in.String() - x.ErrorMessage = f - } - case "skippedObjects": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.SkippedObjects = f - } - case "totalTrees": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.TotalTrees = f - } - case "evacuatedTrees": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.EvacuatedTrees = f - } - case "failedTrees": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.FailedTrees = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetShardEvacuationStatusResponse struct { - Body *GetShardEvacuationStatusResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse)(nil) - _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse)(nil) - _ json.Marshaler = (*GetShardEvacuationStatusResponse)(nil) - _ json.Unmarshaler = (*GetShardEvacuationStatusResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetShardEvacuationStatusResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetShardEvacuationStatusResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetShardEvacuationStatusResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetShardEvacuationStatusResponse) SetBody(v *GetShardEvacuationStatusResponse_Body) { - x.Body = v -} -func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetShardEvacuationStatusResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetShardEvacuationStatusResponse_Body - f = new(GetShardEvacuationStatusResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ResetShardEvacuationStatusRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) - _ json.Marshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) - _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ResetShardEvacuationStatusRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ResetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ResetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ResetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ResetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ResetShardEvacuationStatusRequest struct { - Body *ResetShardEvacuationStatusRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest)(nil) - _ json.Marshaler = (*ResetShardEvacuationStatusRequest)(nil) - _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ResetShardEvacuationStatusRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ResetShardEvacuationStatusRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ResetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ResetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ResetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ResetShardEvacuationStatusRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ResetShardEvacuationStatusRequest) SetBody(v *ResetShardEvacuationStatusRequest_Body) { - x.Body = v -} -func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ResetShardEvacuationStatusRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ResetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ResetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ResetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ResetShardEvacuationStatusRequest_Body - f = new(ResetShardEvacuationStatusRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ResetShardEvacuationStatusResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) - _ json.Marshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) - _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ResetShardEvacuationStatusResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ResetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ResetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ResetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ResetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ResetShardEvacuationStatusResponse struct { - Body *ResetShardEvacuationStatusResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse)(nil) - _ json.Marshaler = (*ResetShardEvacuationStatusResponse)(nil) - _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ResetShardEvacuationStatusResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ResetShardEvacuationStatusResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ResetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ResetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ResetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ResetShardEvacuationStatusResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ResetShardEvacuationStatusResponse) SetBody(v *ResetShardEvacuationStatusResponse_Body) { - x.Body = v -} -func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ResetShardEvacuationStatusResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ResetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ResetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ResetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ResetShardEvacuationStatusResponse_Body - f = new(ResetShardEvacuationStatusResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StopShardEvacuationRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest_Body)(nil) - _ json.Marshaler = (*StopShardEvacuationRequest_Body)(nil) - _ json.Unmarshaler = (*StopShardEvacuationRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StopShardEvacuationRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StopShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StopShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StopShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StopShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StopShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StopShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StopShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StopShardEvacuationRequest struct { - Body *StopShardEvacuationRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest)(nil) - _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest)(nil) - _ json.Marshaler = (*StopShardEvacuationRequest)(nil) - _ json.Unmarshaler = (*StopShardEvacuationRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StopShardEvacuationRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *StopShardEvacuationRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *StopShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StopShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StopShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(StopShardEvacuationRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *StopShardEvacuationRequest) SetBody(v *StopShardEvacuationRequest_Body) { - x.Body = v -} -func (x *StopShardEvacuationRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *StopShardEvacuationRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StopShardEvacuationRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StopShardEvacuationRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StopShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *StopShardEvacuationRequest_Body - f = new(StopShardEvacuationRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StopShardEvacuationResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse_Body)(nil) - _ json.Marshaler = (*StopShardEvacuationResponse_Body)(nil) - _ json.Unmarshaler = (*StopShardEvacuationResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StopShardEvacuationResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StopShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StopShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StopShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StopShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StopShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StopShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StopShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StopShardEvacuationResponse struct { - Body *StopShardEvacuationResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse)(nil) - _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse)(nil) - _ json.Marshaler = (*StopShardEvacuationResponse)(nil) - _ json.Unmarshaler = (*StopShardEvacuationResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StopShardEvacuationResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *StopShardEvacuationResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *StopShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StopShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StopShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(StopShardEvacuationResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *StopShardEvacuationResponse) SetBody(v *StopShardEvacuationResponse_Body) { - x.Body = v -} -func (x *StopShardEvacuationResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *StopShardEvacuationResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StopShardEvacuationResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StopShardEvacuationResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StopShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *StopShardEvacuationResponse_Body - f = new(StopShardEvacuationResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddChainLocalOverrideRequest_Body struct { - Target *ChainTarget `json:"target"` - Chain []byte `json:"chain"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest_Body)(nil) - _ json.Marshaler = (*AddChainLocalOverrideRequest_Body)(nil) - _ json.Unmarshaler = (*AddChainLocalOverrideRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddChainLocalOverrideRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Target) - size += proto.BytesSize(2, x.Chain) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Target != nil { - x.Target.EmitProtobuf(mm.AppendMessage(1)) - } - if len(x.Chain) != 0 { - mm.AppendBytes(2, x.Chain) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest_Body") - } - switch fc.FieldNum { - case 1: // Target - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Target") - } - x.Target = new(ChainTarget) - if err := x.Target.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Chain - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Chain") - } - x.Chain = data - } - } - return nil -} -func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} -func (x *AddChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) { - x.Target = v -} -func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte { - if x != nil { - return x.Chain - } - return nil -} -func (x *AddChainLocalOverrideRequest_Body) SetChain(v []byte) { - x.Chain = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"target\":" - out.RawString(prefix) - x.Target.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chain\":" - out.RawString(prefix) - if x.Chain != nil { - out.Base64Bytes(x.Chain) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "target": - { - var f *ChainTarget - f = new(ChainTarget) - f.UnmarshalEasyJSON(in) - x.Target = f - } - case "chain": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Chain = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddChainLocalOverrideRequest struct { - Body *AddChainLocalOverrideRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest)(nil) - _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest)(nil) - _ json.Marshaler = (*AddChainLocalOverrideRequest)(nil) - _ json.Unmarshaler = (*AddChainLocalOverrideRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddChainLocalOverrideRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *AddChainLocalOverrideRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *AddChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(AddChainLocalOverrideRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *AddChainLocalOverrideRequest) SetBody(v *AddChainLocalOverrideRequest_Body) { - x.Body = v -} -func (x *AddChainLocalOverrideRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *AddChainLocalOverrideRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddChainLocalOverrideRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddChainLocalOverrideRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *AddChainLocalOverrideRequest_Body - f = new(AddChainLocalOverrideRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddChainLocalOverrideResponse_Body struct { - ChainId []byte `json:"chainId"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse_Body)(nil) - _ json.Marshaler = (*AddChainLocalOverrideResponse_Body)(nil) - _ json.Unmarshaler = (*AddChainLocalOverrideResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddChainLocalOverrideResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ChainId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ChainId) != 0 { - mm.AppendBytes(1, x.ChainId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse_Body") - } - switch fc.FieldNum { - case 1: // ChainId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ChainId") - } - x.ChainId = data - } - } - return nil -} -func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} -func (x *AddChainLocalOverrideResponse_Body) SetChainId(v []byte) { - x.ChainId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chainId\":" - out.RawString(prefix) - if x.ChainId != nil { - out.Base64Bytes(x.ChainId) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "chainId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ChainId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddChainLocalOverrideResponse struct { - Body *AddChainLocalOverrideResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse)(nil) - _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse)(nil) - _ json.Marshaler = (*AddChainLocalOverrideResponse)(nil) - _ json.Unmarshaler = (*AddChainLocalOverrideResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddChainLocalOverrideResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *AddChainLocalOverrideResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *AddChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(AddChainLocalOverrideResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *AddChainLocalOverrideResponse) SetBody(v *AddChainLocalOverrideResponse_Body) { - x.Body = v -} -func (x *AddChainLocalOverrideResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *AddChainLocalOverrideResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddChainLocalOverrideResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddChainLocalOverrideResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *AddChainLocalOverrideResponse_Body - f = new(AddChainLocalOverrideResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetChainLocalOverrideRequest_Body struct { - Target *ChainTarget `json:"target"` - ChainId []byte `json:"chainId"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest_Body)(nil) - _ json.Marshaler = (*GetChainLocalOverrideRequest_Body)(nil) - _ json.Unmarshaler = (*GetChainLocalOverrideRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetChainLocalOverrideRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Target) - size += proto.BytesSize(2, x.ChainId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Target != nil { - x.Target.EmitProtobuf(mm.AppendMessage(1)) - } - if len(x.ChainId) != 0 { - mm.AppendBytes(2, x.ChainId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest_Body") - } - switch fc.FieldNum { - case 1: // Target - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Target") - } - x.Target = new(ChainTarget) - if err := x.Target.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // ChainId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ChainId") - } - x.ChainId = data - } - } - return nil -} -func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} -func (x *GetChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) { - x.Target = v -} -func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} -func (x *GetChainLocalOverrideRequest_Body) SetChainId(v []byte) { - x.ChainId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"target\":" - out.RawString(prefix) - x.Target.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chainId\":" - out.RawString(prefix) - if x.ChainId != nil { - out.Base64Bytes(x.ChainId) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "target": - { - var f *ChainTarget - f = new(ChainTarget) - f.UnmarshalEasyJSON(in) - x.Target = f - } - case "chainId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ChainId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetChainLocalOverrideRequest struct { - Body *GetChainLocalOverrideRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest)(nil) - _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest)(nil) - _ json.Marshaler = (*GetChainLocalOverrideRequest)(nil) - _ json.Unmarshaler = (*GetChainLocalOverrideRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetChainLocalOverrideRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetChainLocalOverrideRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetChainLocalOverrideRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetChainLocalOverrideRequest) SetBody(v *GetChainLocalOverrideRequest_Body) { - x.Body = v -} -func (x *GetChainLocalOverrideRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetChainLocalOverrideRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetChainLocalOverrideRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetChainLocalOverrideRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetChainLocalOverrideRequest_Body - f = new(GetChainLocalOverrideRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetChainLocalOverrideResponse_Body struct { - Chain []byte `json:"chain"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse_Body)(nil) - _ json.Marshaler = (*GetChainLocalOverrideResponse_Body)(nil) - _ json.Unmarshaler = (*GetChainLocalOverrideResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetChainLocalOverrideResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Chain) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Chain) != 0 { - mm.AppendBytes(1, x.Chain) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse_Body") - } - switch fc.FieldNum { - case 1: // Chain - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Chain") - } - x.Chain = data - } - } - return nil -} -func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte { - if x != nil { - return x.Chain - } - return nil -} -func (x *GetChainLocalOverrideResponse_Body) SetChain(v []byte) { - x.Chain = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chain\":" - out.RawString(prefix) - if x.Chain != nil { - out.Base64Bytes(x.Chain) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "chain": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Chain = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetChainLocalOverrideResponse struct { - Body *GetChainLocalOverrideResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse)(nil) - _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse)(nil) - _ json.Marshaler = (*GetChainLocalOverrideResponse)(nil) - _ json.Unmarshaler = (*GetChainLocalOverrideResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetChainLocalOverrideResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetChainLocalOverrideResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetChainLocalOverrideResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetChainLocalOverrideResponse) SetBody(v *GetChainLocalOverrideResponse_Body) { - x.Body = v -} -func (x *GetChainLocalOverrideResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetChainLocalOverrideResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetChainLocalOverrideResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetChainLocalOverrideResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetChainLocalOverrideResponse_Body - f = new(GetChainLocalOverrideResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListChainLocalOverridesRequest_Body struct { - Target *ChainTarget `json:"target"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest_Body)(nil) - _ json.Marshaler = (*ListChainLocalOverridesRequest_Body)(nil) - _ json.Unmarshaler = (*ListChainLocalOverridesRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListChainLocalOverridesRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Target) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListChainLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Target != nil { - x.Target.EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListChainLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest_Body") - } - switch fc.FieldNum { - case 1: // Target - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Target") - } - x.Target = new(ChainTarget) - if err := x.Target.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} -func (x *ListChainLocalOverridesRequest_Body) SetTarget(v *ChainTarget) { - x.Target = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListChainLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"target\":" - out.RawString(prefix) - x.Target.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListChainLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListChainLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "target": - { - var f *ChainTarget - f = new(ChainTarget) - f.UnmarshalEasyJSON(in) - x.Target = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListChainLocalOverridesRequest struct { - Body *ListChainLocalOverridesRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest)(nil) - _ json.Marshaler = (*ListChainLocalOverridesRequest)(nil) - _ json.Unmarshaler = (*ListChainLocalOverridesRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListChainLocalOverridesRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListChainLocalOverridesRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListChainLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListChainLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListChainLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListChainLocalOverridesRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListChainLocalOverridesRequest) SetBody(v *ListChainLocalOverridesRequest_Body) { - x.Body = v -} -func (x *ListChainLocalOverridesRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListChainLocalOverridesRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListChainLocalOverridesRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListChainLocalOverridesRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListChainLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListChainLocalOverridesRequest_Body - f = new(ListChainLocalOverridesRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListChainLocalOverridesResponse_Body struct { - Chains [][]byte `json:"chains"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse_Body)(nil) - _ json.Marshaler = (*ListChainLocalOverridesResponse_Body)(nil) - _ json.Unmarshaler = (*ListChainLocalOverridesResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListChainLocalOverridesResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Chains) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListChainLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListChainLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Chains { - mm.AppendBytes(1, x.Chains[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListChainLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse_Body") - } - switch fc.FieldNum { - case 1: // Chains - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Chains") - } - x.Chains = append(x.Chains, data) - } - } - return nil -} -func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte { - if x != nil { - return x.Chains - } - return nil -} -func (x *ListChainLocalOverridesResponse_Body) SetChains(v [][]byte) { - x.Chains = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListChainLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chains\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Chains { - if i != 0 { - out.RawByte(',') - } - if x.Chains[i] != nil { - out.Base64Bytes(x.Chains[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListChainLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "chains": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Chains = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListChainLocalOverridesResponse struct { - Body *ListChainLocalOverridesResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse)(nil) - _ json.Marshaler = (*ListChainLocalOverridesResponse)(nil) - _ json.Unmarshaler = (*ListChainLocalOverridesResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListChainLocalOverridesResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListChainLocalOverridesResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListChainLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListChainLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListChainLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListChainLocalOverridesResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListChainLocalOverridesResponse) SetBody(v *ListChainLocalOverridesResponse_Body) { - x.Body = v -} -func (x *ListChainLocalOverridesResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListChainLocalOverridesResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListChainLocalOverridesResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListChainLocalOverridesResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListChainLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListChainLocalOverridesResponse_Body - f = new(ListChainLocalOverridesResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListTargetsLocalOverridesRequest_Body struct { - ChainName string `json:"chainName"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) - _ json.Marshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) - _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListTargetsLocalOverridesRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.StringSize(1, x.ChainName) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListTargetsLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListTargetsLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ChainName) != 0 { - mm.AppendString(1, x.ChainName) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest_Body") - } - switch fc.FieldNum { - case 1: // ChainName - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ChainName") - } - x.ChainName = data - } - } - return nil -} -func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string { - if x != nil { - return x.ChainName - } - return "" -} -func (x *ListTargetsLocalOverridesRequest_Body) SetChainName(v string) { - x.ChainName = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListTargetsLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chainName\":" - out.RawString(prefix) - out.String(x.ChainName) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "chainName": - { - var f string - f = in.String() - x.ChainName = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListTargetsLocalOverridesRequest struct { - Body *ListTargetsLocalOverridesRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest)(nil) - _ json.Marshaler = (*ListTargetsLocalOverridesRequest)(nil) - _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListTargetsLocalOverridesRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListTargetsLocalOverridesRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListTargetsLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListTargetsLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListTargetsLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListTargetsLocalOverridesRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListTargetsLocalOverridesRequest) SetBody(v *ListTargetsLocalOverridesRequest_Body) { - x.Body = v -} -func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListTargetsLocalOverridesRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListTargetsLocalOverridesRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListTargetsLocalOverridesRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListTargetsLocalOverridesRequest_Body - f = new(ListTargetsLocalOverridesRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListTargetsLocalOverridesResponse_Body struct { - Targets []ChainTarget `json:"targets"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) - _ json.Marshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) - _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - for i := range x.Targets { - size += proto.NestedStructureSizeUnchecked(1, &x.Targets[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListTargetsLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for i := range x.Targets { - x.Targets[i].EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse_Body") - } - switch fc.FieldNum { - case 1: // Targets - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Targets") - } - x.Targets = append(x.Targets, ChainTarget{}) - ff := &x.Targets[len(x.Targets)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []ChainTarget { - if x != nil { - return x.Targets - } - return nil -} -func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []ChainTarget) { - x.Targets = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListTargetsLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"targets\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Targets { - if i != 0 { - out.RawByte(',') - } - x.Targets[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "targets": - { - var f ChainTarget - var list []ChainTarget - in.Delim('[') - for !in.IsDelim(']') { - f = ChainTarget{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Targets = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListTargetsLocalOverridesResponse struct { - Body *ListTargetsLocalOverridesResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse)(nil) - _ json.Marshaler = (*ListTargetsLocalOverridesResponse)(nil) - _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListTargetsLocalOverridesResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListTargetsLocalOverridesResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListTargetsLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListTargetsLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListTargetsLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListTargetsLocalOverridesResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListTargetsLocalOverridesResponse) SetBody(v *ListTargetsLocalOverridesResponse_Body) { - x.Body = v -} -func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListTargetsLocalOverridesResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListTargetsLocalOverridesResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListTargetsLocalOverridesResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListTargetsLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListTargetsLocalOverridesResponse_Body - f = new(ListTargetsLocalOverridesResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverrideRequest_Body struct { - Target *ChainTarget `json:"target"` - ChainId []byte `json:"chainId"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) - _ json.Marshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverrideRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Target) - size += proto.BytesSize(2, x.ChainId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Target != nil { - x.Target.EmitProtobuf(mm.AppendMessage(1)) - } - if len(x.ChainId) != 0 { - mm.AppendBytes(2, x.ChainId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest_Body") - } - switch fc.FieldNum { - case 1: // Target - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Target") - } - x.Target = new(ChainTarget) - if err := x.Target.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // ChainId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ChainId") - } - x.ChainId = data - } - } - return nil -} -func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} -func (x *RemoveChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) { - x.Target = v -} -func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} -func (x *RemoveChainLocalOverrideRequest_Body) SetChainId(v []byte) { - x.ChainId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"target\":" - out.RawString(prefix) - x.Target.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"chainId\":" - out.RawString(prefix) - if x.ChainId != nil { - out.Base64Bytes(x.ChainId) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "target": - { - var f *ChainTarget - f = new(ChainTarget) - f.UnmarshalEasyJSON(in) - x.Target = f - } - case "chainId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ChainId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverrideRequest struct { - Body *RemoveChainLocalOverrideRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest)(nil) - _ json.Marshaler = (*RemoveChainLocalOverrideRequest)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverrideRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveChainLocalOverrideRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveChainLocalOverrideRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveChainLocalOverrideRequest) SetBody(v *RemoveChainLocalOverrideRequest_Body) { - x.Body = v -} -func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveChainLocalOverrideRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverrideRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverrideRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveChainLocalOverrideRequest_Body - f = new(RemoveChainLocalOverrideRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverrideResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) - _ json.Marshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverrideResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverrideResponse struct { - Body *RemoveChainLocalOverrideResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse)(nil) - _ json.Marshaler = (*RemoveChainLocalOverrideResponse)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverrideResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveChainLocalOverrideResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveChainLocalOverrideResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveChainLocalOverrideResponse) SetBody(v *RemoveChainLocalOverrideResponse_Body) { - x.Body = v -} -func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveChainLocalOverrideResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverrideResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverrideResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveChainLocalOverrideResponse_Body - f = new(RemoveChainLocalOverrideResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverridesByTargetRequest_Body struct { - Target *ChainTarget `json:"target"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) - _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Target) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Target != nil { - x.Target.EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest_Body") - } - switch fc.FieldNum { - case 1: // Target - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Target") - } - x.Target = new(ChainTarget) - if err := x.Target.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget { - if x != nil { - return x.Target - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetRequest_Body) SetTarget(v *ChainTarget) { - x.Target = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"target\":" - out.RawString(prefix) - x.Target.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "target": - { - var f *ChainTarget - f = new(ChainTarget) - f.UnmarshalEasyJSON(in) - x.Target = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverridesByTargetRequest struct { - Body *RemoveChainLocalOverridesByTargetRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) - _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverridesByTargetRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveChainLocalOverridesByTargetRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveChainLocalOverridesByTargetRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveChainLocalOverridesByTargetRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetRequest) SetBody(v *RemoveChainLocalOverridesByTargetRequest_Body) { - x.Body = v -} -func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveChainLocalOverridesByTargetRequest_Body - f = new(RemoveChainLocalOverridesByTargetRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverridesByTargetResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) - _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverridesByTargetResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveChainLocalOverridesByTargetResponse struct { - Body *RemoveChainLocalOverridesByTargetResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) - _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) - _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveChainLocalOverridesByTargetResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveChainLocalOverridesByTargetResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveChainLocalOverridesByTargetResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveChainLocalOverridesByTargetResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetResponse) SetBody(v *RemoveChainLocalOverridesByTargetResponse_Body) { - x.Body = v -} -func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveChainLocalOverridesByTargetResponse_Body - f = new(RemoveChainLocalOverridesByTargetResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SealWriteCacheRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - IgnoreErrors bool `json:"ignoreErrors"` - Async bool `json:"async"` - RestoreMode bool `json:"restoreMode"` - Shrink bool `json:"shrink"` -} - -var ( - _ encoding.ProtoMarshaler = (*SealWriteCacheRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest_Body)(nil) - _ json.Marshaler = (*SealWriteCacheRequest_Body)(nil) - _ json.Unmarshaler = (*SealWriteCacheRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SealWriteCacheRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - size += proto.BoolSize(2, x.IgnoreErrors) - size += proto.BoolSize(3, x.Async) - size += proto.BoolSize(4, x.RestoreMode) - size += proto.BoolSize(5, x.Shrink) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SealWriteCacheRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SealWriteCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } - if x.IgnoreErrors { - mm.AppendBool(2, x.IgnoreErrors) - } - if x.Async { - mm.AppendBool(3, x.Async) - } - if x.RestoreMode { - mm.AppendBool(4, x.RestoreMode) - } - if x.Shrink { - mm.AppendBool(5, x.Shrink) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SealWriteCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 2: // IgnoreErrors - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors") - } - x.IgnoreErrors = data - case 3: // Async - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Async") - } - x.Async = data - case 4: // RestoreMode - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "RestoreMode") - } - x.RestoreMode = data - case 5: // Shrink - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shrink") - } - x.Shrink = data - } - } - return nil -} -func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *SealWriteCacheRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool { - if x != nil { - return x.IgnoreErrors - } - return false -} -func (x *SealWriteCacheRequest_Body) SetIgnoreErrors(v bool) { - x.IgnoreErrors = v -} -func (x *SealWriteCacheRequest_Body) GetAsync() bool { - if x != nil { - return x.Async - } - return false -} -func (x *SealWriteCacheRequest_Body) SetAsync(v bool) { - x.Async = v -} -func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool { - if x != nil { - return x.RestoreMode - } - return false -} -func (x *SealWriteCacheRequest_Body) SetRestoreMode(v bool) { - x.RestoreMode = v -} -func (x *SealWriteCacheRequest_Body) GetShrink() bool { - if x != nil { - return x.Shrink - } - return false -} -func (x *SealWriteCacheRequest_Body) SetShrink(v bool) { - x.Shrink = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SealWriteCacheRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"ignoreErrors\":" - out.RawString(prefix) - out.Bool(x.IgnoreErrors) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"async\":" - out.RawString(prefix) - out.Bool(x.Async) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"restoreMode\":" - out.RawString(prefix) - out.Bool(x.RestoreMode) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shrink\":" - out.RawString(prefix) - out.Bool(x.Shrink) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SealWriteCacheRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "ignoreErrors": - { - var f bool - f = in.Bool() - x.IgnoreErrors = f - } - case "async": - { - var f bool - f = in.Bool() - x.Async = f - } - case "restoreMode": - { - var f bool - f = in.Bool() - x.RestoreMode = f - } - case "shrink": - { - var f bool - f = in.Bool() - x.Shrink = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SealWriteCacheRequest struct { - Body *SealWriteCacheRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SealWriteCacheRequest)(nil) - _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest)(nil) - _ json.Marshaler = (*SealWriteCacheRequest)(nil) - _ json.Unmarshaler = (*SealWriteCacheRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SealWriteCacheRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SealWriteCacheRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SealWriteCacheRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SealWriteCacheRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SealWriteCacheRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SealWriteCacheRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SealWriteCacheRequest) SetBody(v *SealWriteCacheRequest_Body) { - x.Body = v -} -func (x *SealWriteCacheRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SealWriteCacheRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SealWriteCacheRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SealWriteCacheRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SealWriteCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SealWriteCacheRequest_Body - f = new(SealWriteCacheRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SealWriteCacheResponse_Body_Status struct { - Shard_ID []byte `json:"shardID"` - Success bool `json:"success"` - Error string `json:"error"` -} - -var ( - _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body_Status)(nil) - _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body_Status)(nil) - _ json.Marshaler = (*SealWriteCacheResponse_Body_Status)(nil) - _ json.Unmarshaler = (*SealWriteCacheResponse_Body_Status)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SealWriteCacheResponse_Body_Status) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Shard_ID) - size += proto.BoolSize(2, x.Success) - size += proto.StringSize(3, x.Error) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SealWriteCacheResponse_Body_Status) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SealWriteCacheResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Shard_ID) != 0 { - mm.AppendBytes(1, x.Shard_ID) - } - if x.Success { - mm.AppendBool(2, x.Success) - } - if len(x.Error) != 0 { - mm.AppendString(3, x.Error) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SealWriteCacheResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body_Status") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = data - case 2: // Success - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Success") - } - x.Success = data - case 3: // Error - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Error") - } - x.Error = data - } - } - return nil -} -func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *SealWriteCacheResponse_Body_Status) SetShard_ID(v []byte) { - x.Shard_ID = v -} -func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} -func (x *SealWriteCacheResponse_Body_Status) SetSuccess(v bool) { - x.Success = v -} -func (x *SealWriteCacheResponse_Body_Status) GetError() string { - if x != nil { - return x.Error - } - return "" -} -func (x *SealWriteCacheResponse_Body_Status) SetError(v string) { - x.Error = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SealWriteCacheResponse_Body_Status) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - if x.Shard_ID != nil { - out.Base64Bytes(x.Shard_ID) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"success\":" - out.RawString(prefix) - out.Bool(x.Success) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"error\":" - out.RawString(prefix) - out.String(x.Error) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SealWriteCacheResponse_Body_Status) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Shard_ID = f - } - case "success": - { - var f bool - f = in.Bool() - x.Success = f - } - case "error": - { - var f string - f = in.String() - x.Error = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SealWriteCacheResponse_Body struct { - Results []SealWriteCacheResponse_Body_Status `json:"results"` -} - -var ( - _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body)(nil) - _ json.Marshaler = (*SealWriteCacheResponse_Body)(nil) - _ json.Unmarshaler = (*SealWriteCacheResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SealWriteCacheResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - for i := range x.Results { - size += proto.NestedStructureSizeUnchecked(1, &x.Results[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SealWriteCacheResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for i := range x.Results { - x.Results[i].EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body") - } - switch fc.FieldNum { - case 1: // Results - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Results") - } - x.Results = append(x.Results, SealWriteCacheResponse_Body_Status{}) - ff := &x.Results[len(x.Results)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SealWriteCacheResponse_Body) GetResults() []SealWriteCacheResponse_Body_Status { - if x != nil { - return x.Results - } - return nil -} -func (x *SealWriteCacheResponse_Body) SetResults(v []SealWriteCacheResponse_Body_Status) { - x.Results = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SealWriteCacheResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"results\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Results { - if i != 0 { - out.RawByte(',') - } - x.Results[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SealWriteCacheResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "results": - { - var f SealWriteCacheResponse_Body_Status - var list []SealWriteCacheResponse_Body_Status - in.Delim('[') - for !in.IsDelim(']') { - f = SealWriteCacheResponse_Body_Status{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Results = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type SealWriteCacheResponse struct { - Body *SealWriteCacheResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*SealWriteCacheResponse)(nil) - _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse)(nil) - _ json.Marshaler = (*SealWriteCacheResponse)(nil) - _ json.Unmarshaler = (*SealWriteCacheResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *SealWriteCacheResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *SealWriteCacheResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *SealWriteCacheResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *SealWriteCacheResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *SealWriteCacheResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(SealWriteCacheResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *SealWriteCacheResponse) SetBody(v *SealWriteCacheResponse_Body) { - x.Body = v -} -func (x *SealWriteCacheResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *SealWriteCacheResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *SealWriteCacheResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *SealWriteCacheResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *SealWriteCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *SealWriteCacheResponse_Body - f = new(SealWriteCacheResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DetachShardsRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` -} - -var ( - _ encoding.ProtoMarshaler = (*DetachShardsRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*DetachShardsRequest_Body)(nil) - _ json.Marshaler = (*DetachShardsRequest_Body)(nil) - _ json.Unmarshaler = (*DetachShardsRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DetachShardsRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DetachShardsRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DetachShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DetachShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - } - } - return nil -} -func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *DetachShardsRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DetachShardsRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DetachShardsRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DetachShardsRequest struct { - Body *DetachShardsRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*DetachShardsRequest)(nil) - _ encoding.ProtoUnmarshaler = (*DetachShardsRequest)(nil) - _ json.Marshaler = (*DetachShardsRequest)(nil) - _ json.Unmarshaler = (*DetachShardsRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DetachShardsRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *DetachShardsRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *DetachShardsRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DetachShardsRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DetachShardsRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(DetachShardsRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *DetachShardsRequest) SetBody(v *DetachShardsRequest_Body) { - x.Body = v -} -func (x *DetachShardsRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *DetachShardsRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DetachShardsRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DetachShardsRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DetachShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *DetachShardsRequest_Body - f = new(DetachShardsRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DetachShardsResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*DetachShardsResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*DetachShardsResponse_Body)(nil) - _ json.Marshaler = (*DetachShardsResponse_Body)(nil) - _ json.Unmarshaler = (*DetachShardsResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DetachShardsResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DetachShardsResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DetachShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DetachShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DetachShardsResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DetachShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DetachShardsResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DetachShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type DetachShardsResponse struct { - Body *DetachShardsResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*DetachShardsResponse)(nil) - _ encoding.ProtoUnmarshaler = (*DetachShardsResponse)(nil) - _ json.Marshaler = (*DetachShardsResponse)(nil) - _ json.Unmarshaler = (*DetachShardsResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *DetachShardsResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *DetachShardsResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *DetachShardsResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *DetachShardsResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *DetachShardsResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(DetachShardsResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *DetachShardsResponse) SetBody(v *DetachShardsResponse_Body) { - x.Body = v -} -func (x *DetachShardsResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *DetachShardsResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *DetachShardsResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *DetachShardsResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *DetachShardsResponse_Body - f = new(DetachShardsResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardRebuildRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - TargetFillPercent uint32 `json:"targetFillPercent"` - ConcurrencyLimit uint32 `json:"concurrencyLimit"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardRebuildRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest_Body)(nil) - _ json.Marshaler = (*StartShardRebuildRequest_Body)(nil) - _ json.Unmarshaler = (*StartShardRebuildRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardRebuildRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - size += proto.UInt32Size(2, x.TargetFillPercent) - size += proto.UInt32Size(3, x.ConcurrencyLimit) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardRebuildRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardRebuildRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } - if x.TargetFillPercent != 0 { - mm.AppendUint32(2, x.TargetFillPercent) - } - if x.ConcurrencyLimit != 0 { - mm.AppendUint32(3, x.ConcurrencyLimit) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardRebuildRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - case 2: // TargetFillPercent - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TargetFillPercent") - } - x.TargetFillPercent = data - case 3: // ConcurrencyLimit - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ConcurrencyLimit") - } - x.ConcurrencyLimit = data - } - } - return nil -} -func (x *StartShardRebuildRequest_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *StartShardRebuildRequest_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} -func (x *StartShardRebuildRequest_Body) GetTargetFillPercent() uint32 { - if x != nil { - return x.TargetFillPercent - } - return 0 -} -func (x *StartShardRebuildRequest_Body) SetTargetFillPercent(v uint32) { - x.TargetFillPercent = v -} -func (x *StartShardRebuildRequest_Body) GetConcurrencyLimit() uint32 { - if x != nil { - return x.ConcurrencyLimit - } - return 0 -} -func (x *StartShardRebuildRequest_Body) SetConcurrencyLimit(v uint32) { - x.ConcurrencyLimit = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardRebuildRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"targetFillPercent\":" - out.RawString(prefix) - out.Uint32(x.TargetFillPercent) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"concurrencyLimit\":" - out.RawString(prefix) - out.Uint32(x.ConcurrencyLimit) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardRebuildRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - case "targetFillPercent": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.TargetFillPercent = f - } - case "concurrencyLimit": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.ConcurrencyLimit = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardRebuildRequest struct { - Body *StartShardRebuildRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardRebuildRequest)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest)(nil) - _ json.Marshaler = (*StartShardRebuildRequest)(nil) - _ json.Unmarshaler = (*StartShardRebuildRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardRebuildRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *StartShardRebuildRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *StartShardRebuildRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardRebuildRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardRebuildRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardRebuildRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(StartShardRebuildRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StartShardRebuildRequest) GetBody() *StartShardRebuildRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *StartShardRebuildRequest) SetBody(v *StartShardRebuildRequest_Body) { - x.Body = v -} -func (x *StartShardRebuildRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *StartShardRebuildRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardRebuildRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardRebuildRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardRebuildRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *StartShardRebuildRequest_Body - f = new(StartShardRebuildRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardRebuildResponse_Body_Status struct { - Shard_ID []byte `json:"shardID"` - Success bool `json:"success"` - Error string `json:"error"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body_Status)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body_Status)(nil) - _ json.Marshaler = (*StartShardRebuildResponse_Body_Status)(nil) - _ json.Unmarshaler = (*StartShardRebuildResponse_Body_Status)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardRebuildResponse_Body_Status) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Shard_ID) - size += proto.BoolSize(2, x.Success) - size += proto.StringSize(3, x.Error) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardRebuildResponse_Body_Status) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardRebuildResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Shard_ID) != 0 { - mm.AppendBytes(1, x.Shard_ID) - } - if x.Success { - mm.AppendBool(2, x.Success) - } - if len(x.Error) != 0 { - mm.AppendString(3, x.Error) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardRebuildResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body_Status") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = data - case 2: // Success - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Success") - } - x.Success = data - case 3: // Error - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Error") - } - x.Error = data - } - } - return nil -} -func (x *StartShardRebuildResponse_Body_Status) GetShard_ID() []byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *StartShardRebuildResponse_Body_Status) SetShard_ID(v []byte) { - x.Shard_ID = v -} -func (x *StartShardRebuildResponse_Body_Status) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} -func (x *StartShardRebuildResponse_Body_Status) SetSuccess(v bool) { - x.Success = v -} -func (x *StartShardRebuildResponse_Body_Status) GetError() string { - if x != nil { - return x.Error - } - return "" -} -func (x *StartShardRebuildResponse_Body_Status) SetError(v string) { - x.Error = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardRebuildResponse_Body_Status) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - if x.Shard_ID != nil { - out.Base64Bytes(x.Shard_ID) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"success\":" - out.RawString(prefix) - out.Bool(x.Success) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"error\":" - out.RawString(prefix) - out.String(x.Error) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardRebuildResponse_Body_Status) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Shard_ID = f - } - case "success": - { - var f bool - f = in.Bool() - x.Success = f - } - case "error": - { - var f string - f = in.String() - x.Error = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardRebuildResponse_Body struct { - Results []StartShardRebuildResponse_Body_Status `json:"results"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body)(nil) - _ json.Marshaler = (*StartShardRebuildResponse_Body)(nil) - _ json.Unmarshaler = (*StartShardRebuildResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardRebuildResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - for i := range x.Results { - size += proto.NestedStructureSizeUnchecked(1, &x.Results[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardRebuildResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardRebuildResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for i := range x.Results { - x.Results[i].EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardRebuildResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body") - } - switch fc.FieldNum { - case 1: // Results - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Results") - } - x.Results = append(x.Results, StartShardRebuildResponse_Body_Status{}) - ff := &x.Results[len(x.Results)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StartShardRebuildResponse_Body) GetResults() []StartShardRebuildResponse_Body_Status { - if x != nil { - return x.Results - } - return nil -} -func (x *StartShardRebuildResponse_Body) SetResults(v []StartShardRebuildResponse_Body_Status) { - x.Results = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardRebuildResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"results\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Results { - if i != 0 { - out.RawByte(',') - } - x.Results[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardRebuildResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardRebuildResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "results": - { - var f StartShardRebuildResponse_Body_Status - var list []StartShardRebuildResponse_Body_Status - in.Delim('[') - for !in.IsDelim(']') { - f = StartShardRebuildResponse_Body_Status{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Results = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type StartShardRebuildResponse struct { - Body *StartShardRebuildResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*StartShardRebuildResponse)(nil) - _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse)(nil) - _ json.Marshaler = (*StartShardRebuildResponse)(nil) - _ json.Unmarshaler = (*StartShardRebuildResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *StartShardRebuildResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *StartShardRebuildResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *StartShardRebuildResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *StartShardRebuildResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *StartShardRebuildResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *StartShardRebuildResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(StartShardRebuildResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *StartShardRebuildResponse) GetBody() *StartShardRebuildResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *StartShardRebuildResponse) SetBody(v *StartShardRebuildResponse_Body) { - x.Body = v -} -func (x *StartShardRebuildResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *StartShardRebuildResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *StartShardRebuildResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *StartShardRebuildResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *StartShardRebuildResponse_Body - f = new(StartShardRebuildResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectRequest_Body struct { - ObjectId string `json:"objectId"` - ContainerId string `json:"containerId"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil) - _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil) - _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.StringSize(1, x.ObjectId) - size += proto.StringSize(2, x.ContainerId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ObjectId) != 0 { - mm.AppendString(1, x.ObjectId) - } - if len(x.ContainerId) != 0 { - mm.AppendString(2, x.ContainerId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body") - } - switch fc.FieldNum { - case 1: // ObjectId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ObjectId") - } - x.ObjectId = data - case 2: // ContainerId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - } - } - return nil -} -func (x *ListShardsForObjectRequest_Body) GetObjectId() string { - if x != nil { - return x.ObjectId - } - return "" -} -func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) { - x.ObjectId = v -} -func (x *ListShardsForObjectRequest_Body) GetContainerId() string { - if x != nil { - return x.ContainerId - } - return "" -} -func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) { - x.ContainerId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"objectId\":" - out.RawString(prefix) - out.String(x.ObjectId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - out.String(x.ContainerId) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "objectId": - { - var f string - f = in.String() - x.ObjectId = f - } - case "containerId": - { - var f string - f = in.String() - x.ContainerId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectRequest struct { - Body *ListShardsForObjectRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil) - _ json.Marshaler = (*ListShardsForObjectRequest)(nil) - _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListShardsForObjectRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListShardsForObjectRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) { - x.Body = v -} -func (x *ListShardsForObjectRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListShardsForObjectRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListShardsForObjectRequest_Body - f = new(ListShardsForObjectRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectResponse_Body struct { - Shard_ID [][]byte `json:"shardID"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil) - _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil) - _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - } - } - return nil -} -func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectResponse struct { - Body *ListShardsForObjectResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil) - _ json.Marshaler = (*ListShardsForObjectResponse)(nil) - _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListShardsForObjectResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListShardsForObjectResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) { - x.Body = v -} -func (x *ListShardsForObjectResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListShardsForObjectResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListShardsForObjectResponse_Body - f = new(ListShardsForObjectResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go deleted file mode 100644 index 045662ccf..000000000 --- a/pkg/services/control/service_grpc.pb.go +++ /dev/null @@ -1,975 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.0 -// source: pkg/services/control/service.proto - -package control - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - ControlService_HealthCheck_FullMethodName = "/control.ControlService/HealthCheck" - ControlService_SetNetmapStatus_FullMethodName = "/control.ControlService/SetNetmapStatus" - ControlService_GetNetmapStatus_FullMethodName = "/control.ControlService/GetNetmapStatus" - ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects" - ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards" - ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode" - ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree" - ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation" - ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus" - ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus" - ControlService_StopShardEvacuation_FullMethodName = "/control.ControlService/StopShardEvacuation" - ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache" - ControlService_Doctor_FullMethodName = "/control.ControlService/Doctor" - ControlService_AddChainLocalOverride_FullMethodName = "/control.ControlService/AddChainLocalOverride" - ControlService_GetChainLocalOverride_FullMethodName = "/control.ControlService/GetChainLocalOverride" - ControlService_ListChainLocalOverrides_FullMethodName = "/control.ControlService/ListChainLocalOverrides" - ControlService_RemoveChainLocalOverride_FullMethodName = "/control.ControlService/RemoveChainLocalOverride" - ControlService_RemoveChainLocalOverridesByTarget_FullMethodName = "/control.ControlService/RemoveChainLocalOverridesByTarget" - ControlService_ListTargetsLocalOverrides_FullMethodName = "/control.ControlService/ListTargetsLocalOverrides" - ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache" - ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards" - ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild" - ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject" -) - -// ControlServiceClient is the client API for ControlService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ControlServiceClient interface { - // Performs health check of the storage node. - HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) - // Sets status of the storage node in FrostFS network map. - SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error) - // Gets status of the storage node in FrostFS network map. - GetNetmapStatus(ctx context.Context, in *GetNetmapStatusRequest, opts ...grpc.CallOption) (*GetNetmapStatusResponse, error) - // Mark objects to be removed from node's local object storage. - DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error) - // Returns list that contains information about all shards of a node. - ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error) - // Sets mode of the shard. - SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) - // Synchronizes all log operations for the specified tree. - SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) - // StartShardEvacuation starts moving all data from one shard to the others. - StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) - // GetShardEvacuationStatus returns evacuation status. - GetShardEvacuationStatus(ctx context.Context, in *GetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*GetShardEvacuationStatusResponse, error) - // ResetShardEvacuationStatus resets evacuation status if there is no running - // evacuation process. - ResetShardEvacuationStatus(ctx context.Context, in *ResetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*ResetShardEvacuationStatusResponse, error) - // StopShardEvacuation stops moving all data from one shard to the others. - StopShardEvacuation(ctx context.Context, in *StopShardEvacuationRequest, opts ...grpc.CallOption) (*StopShardEvacuationResponse, error) - // FlushCache moves all data from one shard to the others. - FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error) - // Doctor performs storage restructuring operations on engine. - Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error) - // Add local access policy engine overrides to a node. - AddChainLocalOverride(ctx context.Context, in *AddChainLocalOverrideRequest, opts ...grpc.CallOption) (*AddChainLocalOverrideResponse, error) - // Get local access policy engine overrides stored in the node by chain id. - GetChainLocalOverride(ctx context.Context, in *GetChainLocalOverrideRequest, opts ...grpc.CallOption) (*GetChainLocalOverrideResponse, error) - // List local access policy engine overrides stored in the node by container - // id. - ListChainLocalOverrides(ctx context.Context, in *ListChainLocalOverridesRequest, opts ...grpc.CallOption) (*ListChainLocalOverridesResponse, error) - // Remove local access policy engine overrides stored in the node by chaind - // id. - RemoveChainLocalOverride(ctx context.Context, in *RemoveChainLocalOverrideRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverrideResponse, error) - // Remove local access policy engine overrides stored in the node by chaind - // id. - RemoveChainLocalOverridesByTarget(ctx context.Context, in *RemoveChainLocalOverridesByTargetRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) - // List targets of the local APE overrides stored in the node. - ListTargetsLocalOverrides(ctx context.Context, in *ListTargetsLocalOverridesRequest, opts ...grpc.CallOption) (*ListTargetsLocalOverridesResponse, error) - // Flush objects from write-cache and move it to degraded read only mode. - SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error) - // DetachShards detaches and closes shards. - DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) - // StartShardRebuild starts shard rebuild process. - StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) - // ListShardsForObject returns shard info where object is stored. - ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) -} - -type controlServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient { - return &controlServiceClient{cc} -} - -func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error) { - out := new(SetNetmapStatusResponse) - err := c.cc.Invoke(ctx, ControlService_SetNetmapStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) GetNetmapStatus(ctx context.Context, in *GetNetmapStatusRequest, opts ...grpc.CallOption) (*GetNetmapStatusResponse, error) { - out := new(GetNetmapStatusResponse) - err := c.cc.Invoke(ctx, ControlService_GetNetmapStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error) { - out := new(DropObjectsResponse) - err := c.cc.Invoke(ctx, ControlService_DropObjects_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error) { - out := new(ListShardsResponse) - err := c.cc.Invoke(ctx, ControlService_ListShards_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) { - out := new(SetShardModeResponse) - err := c.cc.Invoke(ctx, ControlService_SetShardMode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) { - out := new(SynchronizeTreeResponse) - err := c.cc.Invoke(ctx, ControlService_SynchronizeTree_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) { - out := new(StartShardEvacuationResponse) - err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) GetShardEvacuationStatus(ctx context.Context, in *GetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*GetShardEvacuationStatusResponse, error) { - out := new(GetShardEvacuationStatusResponse) - err := c.cc.Invoke(ctx, ControlService_GetShardEvacuationStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) ResetShardEvacuationStatus(ctx context.Context, in *ResetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*ResetShardEvacuationStatusResponse, error) { - out := new(ResetShardEvacuationStatusResponse) - err := c.cc.Invoke(ctx, ControlService_ResetShardEvacuationStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) StopShardEvacuation(ctx context.Context, in *StopShardEvacuationRequest, opts ...grpc.CallOption) (*StopShardEvacuationResponse, error) { - out := new(StopShardEvacuationResponse) - err := c.cc.Invoke(ctx, ControlService_StopShardEvacuation_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error) { - out := new(FlushCacheResponse) - err := c.cc.Invoke(ctx, ControlService_FlushCache_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error) { - out := new(DoctorResponse) - err := c.cc.Invoke(ctx, ControlService_Doctor_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) AddChainLocalOverride(ctx context.Context, in *AddChainLocalOverrideRequest, opts ...grpc.CallOption) (*AddChainLocalOverrideResponse, error) { - out := new(AddChainLocalOverrideResponse) - err := c.cc.Invoke(ctx, ControlService_AddChainLocalOverride_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) GetChainLocalOverride(ctx context.Context, in *GetChainLocalOverrideRequest, opts ...grpc.CallOption) (*GetChainLocalOverrideResponse, error) { - out := new(GetChainLocalOverrideResponse) - err := c.cc.Invoke(ctx, ControlService_GetChainLocalOverride_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) ListChainLocalOverrides(ctx context.Context, in *ListChainLocalOverridesRequest, opts ...grpc.CallOption) (*ListChainLocalOverridesResponse, error) { - out := new(ListChainLocalOverridesResponse) - err := c.cc.Invoke(ctx, ControlService_ListChainLocalOverrides_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) RemoveChainLocalOverride(ctx context.Context, in *RemoveChainLocalOverrideRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverrideResponse, error) { - out := new(RemoveChainLocalOverrideResponse) - err := c.cc.Invoke(ctx, ControlService_RemoveChainLocalOverride_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) RemoveChainLocalOverridesByTarget(ctx context.Context, in *RemoveChainLocalOverridesByTargetRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) { - out := new(RemoveChainLocalOverridesByTargetResponse) - err := c.cc.Invoke(ctx, ControlService_RemoveChainLocalOverridesByTarget_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) ListTargetsLocalOverrides(ctx context.Context, in *ListTargetsLocalOverridesRequest, opts ...grpc.CallOption) (*ListTargetsLocalOverridesResponse, error) { - out := new(ListTargetsLocalOverridesResponse) - err := c.cc.Invoke(ctx, ControlService_ListTargetsLocalOverrides_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error) { - out := new(SealWriteCacheResponse) - err := c.cc.Invoke(ctx, ControlService_SealWriteCache_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) { - out := new(DetachShardsResponse) - err := c.cc.Invoke(ctx, ControlService_DetachShards_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) { - out := new(StartShardRebuildResponse) - err := c.cc.Invoke(ctx, ControlService_StartShardRebuild_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) { - out := new(ListShardsForObjectResponse) - err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ControlServiceServer is the server API for ControlService service. -// All implementations should embed UnimplementedControlServiceServer -// for forward compatibility -type ControlServiceServer interface { - // Performs health check of the storage node. - HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) - // Sets status of the storage node in FrostFS network map. - SetNetmapStatus(context.Context, *SetNetmapStatusRequest) (*SetNetmapStatusResponse, error) - // Gets status of the storage node in FrostFS network map. - GetNetmapStatus(context.Context, *GetNetmapStatusRequest) (*GetNetmapStatusResponse, error) - // Mark objects to be removed from node's local object storage. - DropObjects(context.Context, *DropObjectsRequest) (*DropObjectsResponse, error) - // Returns list that contains information about all shards of a node. - ListShards(context.Context, *ListShardsRequest) (*ListShardsResponse, error) - // Sets mode of the shard. - SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) - // Synchronizes all log operations for the specified tree. - SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) - // StartShardEvacuation starts moving all data from one shard to the others. - StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) - // GetShardEvacuationStatus returns evacuation status. - GetShardEvacuationStatus(context.Context, *GetShardEvacuationStatusRequest) (*GetShardEvacuationStatusResponse, error) - // ResetShardEvacuationStatus resets evacuation status if there is no running - // evacuation process. - ResetShardEvacuationStatus(context.Context, *ResetShardEvacuationStatusRequest) (*ResetShardEvacuationStatusResponse, error) - // StopShardEvacuation stops moving all data from one shard to the others. - StopShardEvacuation(context.Context, *StopShardEvacuationRequest) (*StopShardEvacuationResponse, error) - // FlushCache moves all data from one shard to the others. - FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error) - // Doctor performs storage restructuring operations on engine. - Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error) - // Add local access policy engine overrides to a node. - AddChainLocalOverride(context.Context, *AddChainLocalOverrideRequest) (*AddChainLocalOverrideResponse, error) - // Get local access policy engine overrides stored in the node by chain id. - GetChainLocalOverride(context.Context, *GetChainLocalOverrideRequest) (*GetChainLocalOverrideResponse, error) - // List local access policy engine overrides stored in the node by container - // id. - ListChainLocalOverrides(context.Context, *ListChainLocalOverridesRequest) (*ListChainLocalOverridesResponse, error) - // Remove local access policy engine overrides stored in the node by chaind - // id. - RemoveChainLocalOverride(context.Context, *RemoveChainLocalOverrideRequest) (*RemoveChainLocalOverrideResponse, error) - // Remove local access policy engine overrides stored in the node by chaind - // id. - RemoveChainLocalOverridesByTarget(context.Context, *RemoveChainLocalOverridesByTargetRequest) (*RemoveChainLocalOverridesByTargetResponse, error) - // List targets of the local APE overrides stored in the node. - ListTargetsLocalOverrides(context.Context, *ListTargetsLocalOverridesRequest) (*ListTargetsLocalOverridesResponse, error) - // Flush objects from write-cache and move it to degraded read only mode. - SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error) - // DetachShards detaches and closes shards. - DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) - // StartShardRebuild starts shard rebuild process. - StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) - // ListShardsForObject returns shard info where object is stored. - ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) -} - -// UnimplementedControlServiceServer should be embedded to have forward compatible implementations. -type UnimplementedControlServiceServer struct { -} - -func (UnimplementedControlServiceServer) HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented") -} -func (UnimplementedControlServiceServer) SetNetmapStatus(context.Context, *SetNetmapStatusRequest) (*SetNetmapStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetNetmapStatus not implemented") -} -func (UnimplementedControlServiceServer) GetNetmapStatus(context.Context, *GetNetmapStatusRequest) (*GetNetmapStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNetmapStatus not implemented") -} -func (UnimplementedControlServiceServer) DropObjects(context.Context, *DropObjectsRequest) (*DropObjectsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DropObjects not implemented") -} -func (UnimplementedControlServiceServer) ListShards(context.Context, *ListShardsRequest) (*ListShardsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListShards not implemented") -} -func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetShardMode not implemented") -} -func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented") -} -func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented") -} -func (UnimplementedControlServiceServer) GetShardEvacuationStatus(context.Context, *GetShardEvacuationStatusRequest) (*GetShardEvacuationStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetShardEvacuationStatus not implemented") -} -func (UnimplementedControlServiceServer) ResetShardEvacuationStatus(context.Context, *ResetShardEvacuationStatusRequest) (*ResetShardEvacuationStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResetShardEvacuationStatus not implemented") -} -func (UnimplementedControlServiceServer) StopShardEvacuation(context.Context, *StopShardEvacuationRequest) (*StopShardEvacuationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StopShardEvacuation not implemented") -} -func (UnimplementedControlServiceServer) FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FlushCache not implemented") -} -func (UnimplementedControlServiceServer) Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Doctor not implemented") -} -func (UnimplementedControlServiceServer) AddChainLocalOverride(context.Context, *AddChainLocalOverrideRequest) (*AddChainLocalOverrideResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddChainLocalOverride not implemented") -} -func (UnimplementedControlServiceServer) GetChainLocalOverride(context.Context, *GetChainLocalOverrideRequest) (*GetChainLocalOverrideResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetChainLocalOverride not implemented") -} -func (UnimplementedControlServiceServer) ListChainLocalOverrides(context.Context, *ListChainLocalOverridesRequest) (*ListChainLocalOverridesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListChainLocalOverrides not implemented") -} -func (UnimplementedControlServiceServer) RemoveChainLocalOverride(context.Context, *RemoveChainLocalOverrideRequest) (*RemoveChainLocalOverrideResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveChainLocalOverride not implemented") -} -func (UnimplementedControlServiceServer) RemoveChainLocalOverridesByTarget(context.Context, *RemoveChainLocalOverridesByTargetRequest) (*RemoveChainLocalOverridesByTargetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveChainLocalOverridesByTarget not implemented") -} -func (UnimplementedControlServiceServer) ListTargetsLocalOverrides(context.Context, *ListTargetsLocalOverridesRequest) (*ListTargetsLocalOverridesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTargetsLocalOverrides not implemented") -} -func (UnimplementedControlServiceServer) SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SealWriteCache not implemented") -} -func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DetachShards not implemented") -} -func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented") -} -func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented") -} - -// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ControlServiceServer will -// result in compilation errors. -type UnsafeControlServiceServer interface { - mustEmbedUnimplementedControlServiceServer() -} - -func RegisterControlServiceServer(s grpc.ServiceRegistrar, srv ControlServiceServer) { - s.RegisterService(&ControlService_ServiceDesc, srv) -} - -func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).HealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_HealthCheck_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_SetNetmapStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNetmapStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).SetNetmapStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_SetNetmapStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).SetNetmapStatus(ctx, req.(*SetNetmapStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_GetNetmapStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNetmapStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).GetNetmapStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_GetNetmapStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).GetNetmapStatus(ctx, req.(*GetNetmapStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_DropObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DropObjectsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).DropObjects(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_DropObjects_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).DropObjects(ctx, req.(*DropObjectsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_ListShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListShardsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).ListShards(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_ListShards_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).ListShards(ctx, req.(*ListShardsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_SetShardMode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetShardModeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).SetShardMode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_SetShardMode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).SetShardMode(ctx, req.(*SetShardModeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SynchronizeTreeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).SynchronizeTree(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_SynchronizeTree_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).SynchronizeTree(ctx, req.(*SynchronizeTreeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartShardEvacuationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).StartShardEvacuation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_StartShardEvacuation_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).StartShardEvacuation(ctx, req.(*StartShardEvacuationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_GetShardEvacuationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetShardEvacuationStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).GetShardEvacuationStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_GetShardEvacuationStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).GetShardEvacuationStatus(ctx, req.(*GetShardEvacuationStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_ResetShardEvacuationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResetShardEvacuationStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).ResetShardEvacuationStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_ResetShardEvacuationStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).ResetShardEvacuationStatus(ctx, req.(*ResetShardEvacuationStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_StopShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StopShardEvacuationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).StopShardEvacuation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_StopShardEvacuation_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).StopShardEvacuation(ctx, req.(*StopShardEvacuationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FlushCacheRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).FlushCache(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_FlushCache_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).FlushCache(ctx, req.(*FlushCacheRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_Doctor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DoctorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).Doctor(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_Doctor_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).Doctor(ctx, req.(*DoctorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_AddChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddChainLocalOverrideRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).AddChainLocalOverride(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_AddChainLocalOverride_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).AddChainLocalOverride(ctx, req.(*AddChainLocalOverrideRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_GetChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetChainLocalOverrideRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).GetChainLocalOverride(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_GetChainLocalOverride_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).GetChainLocalOverride(ctx, req.(*GetChainLocalOverrideRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_ListChainLocalOverrides_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListChainLocalOverridesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).ListChainLocalOverrides(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_ListChainLocalOverrides_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).ListChainLocalOverrides(ctx, req.(*ListChainLocalOverridesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_RemoveChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveChainLocalOverrideRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).RemoveChainLocalOverride(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_RemoveChainLocalOverride_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).RemoveChainLocalOverride(ctx, req.(*RemoveChainLocalOverrideRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_RemoveChainLocalOverridesByTarget_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveChainLocalOverridesByTargetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).RemoveChainLocalOverridesByTarget(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_RemoveChainLocalOverridesByTarget_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).RemoveChainLocalOverridesByTarget(ctx, req.(*RemoveChainLocalOverridesByTargetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_ListTargetsLocalOverrides_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTargetsLocalOverridesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).ListTargetsLocalOverrides(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_ListTargetsLocalOverrides_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).ListTargetsLocalOverrides(ctx, req.(*ListTargetsLocalOverridesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_SealWriteCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SealWriteCacheRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).SealWriteCache(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_SealWriteCache_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).SealWriteCache(ctx, req.(*SealWriteCacheRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_DetachShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DetachShardsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).DetachShards(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_DetachShards_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).DetachShards(ctx, req.(*DetachShardsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartShardRebuildRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).StartShardRebuild(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_StartShardRebuild_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).StartShardRebuild(ctx, req.(*StartShardRebuildRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListShardsForObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).ListShardsForObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_ListShardsForObject_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ControlService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "control.ControlService", - HandlerType: (*ControlServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "HealthCheck", - Handler: _ControlService_HealthCheck_Handler, - }, - { - MethodName: "SetNetmapStatus", - Handler: _ControlService_SetNetmapStatus_Handler, - }, - { - MethodName: "GetNetmapStatus", - Handler: _ControlService_GetNetmapStatus_Handler, - }, - { - MethodName: "DropObjects", - Handler: _ControlService_DropObjects_Handler, - }, - { - MethodName: "ListShards", - Handler: _ControlService_ListShards_Handler, - }, - { - MethodName: "SetShardMode", - Handler: _ControlService_SetShardMode_Handler, - }, - { - MethodName: "SynchronizeTree", - Handler: _ControlService_SynchronizeTree_Handler, - }, - { - MethodName: "StartShardEvacuation", - Handler: _ControlService_StartShardEvacuation_Handler, - }, - { - MethodName: "GetShardEvacuationStatus", - Handler: _ControlService_GetShardEvacuationStatus_Handler, - }, - { - MethodName: "ResetShardEvacuationStatus", - Handler: _ControlService_ResetShardEvacuationStatus_Handler, - }, - { - MethodName: "StopShardEvacuation", - Handler: _ControlService_StopShardEvacuation_Handler, - }, - { - MethodName: "FlushCache", - Handler: _ControlService_FlushCache_Handler, - }, - { - MethodName: "Doctor", - Handler: _ControlService_Doctor_Handler, - }, - { - MethodName: "AddChainLocalOverride", - Handler: _ControlService_AddChainLocalOverride_Handler, - }, - { - MethodName: "GetChainLocalOverride", - Handler: _ControlService_GetChainLocalOverride_Handler, - }, - { - MethodName: "ListChainLocalOverrides", - Handler: _ControlService_ListChainLocalOverrides_Handler, - }, - { - MethodName: "RemoveChainLocalOverride", - Handler: _ControlService_RemoveChainLocalOverride_Handler, - }, - { - MethodName: "RemoveChainLocalOverridesByTarget", - Handler: _ControlService_RemoveChainLocalOverridesByTarget_Handler, - }, - { - MethodName: "ListTargetsLocalOverrides", - Handler: _ControlService_ListTargetsLocalOverrides_Handler, - }, - { - MethodName: "SealWriteCache", - Handler: _ControlService_SealWriteCache_Handler, - }, - { - MethodName: "DetachShards", - Handler: _ControlService_DetachShards_Handler, - }, - { - MethodName: "StartShardRebuild", - Handler: _ControlService_StartShardRebuild_Handler, - }, - { - MethodName: "ListShardsForObject", - Handler: _ControlService_ListShardsForObject_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "pkg/services/control/service.proto", -} diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto deleted file mode 100644 index d8135ed64..000000000 --- a/pkg/services/control/types.proto +++ /dev/null @@ -1,194 +0,0 @@ -syntax = "proto3"; - -package control; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"; - -// Signature of some message. -message Signature { - // Public key used for signing. - bytes key = 1 [ json_name = "key" ]; - - // Binary signature. - bytes sign = 2 [ json_name = "signature" ]; -} - -// Status of the storage node in the FrostFS network map. -enum NetmapStatus { - // Undefined status, default value. - STATUS_UNDEFINED = 0; - - // Node is online. - ONLINE = 1; - - // Node is offline. - OFFLINE = 2; - - // Node is maintained by the owner. - MAINTENANCE = 3; -} - -// FrostFS node description. -message NodeInfo { - // Public key of the FrostFS node in a binary format. - bytes public_key = 1 [ json_name = "publicKey" ]; - - // Ways to connect to a node. - repeated string addresses = 2 [ json_name = "addresses" ]; - - // Administrator-defined Attributes of the FrostFS Storage Node. - // - // `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8 - // string. Value can't be empty. - // - // Node's attributes are mostly used during Storage Policy evaluation to - // calculate object's placement and find a set of nodes satisfying policy - // requirements. There are some "well-known" node attributes common to all the - // Storage Nodes in the network and used implicitly with default values if not - // explicitly set: - // - // * Capacity \ - // Total available disk space in Gigabytes. - // * Price \ - // Price in GAS tokens for storing one GB of data during one Epoch. In node - // attributes it's a string presenting floating point number with comma or - // point delimiter for decimal part. In the Network Map it will be saved as - // 64-bit unsigned integer representing number of minimal token fractions. - // * Locode \ - // Node's geographic location in - // [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html) - // format approximated to the nearest point defined in standard. - // * Country \ - // Country code in - // [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) - // format. Calculated automatically from `Locode` attribute - // * Region \ - // Country's administative subdivision where node is located. Calculated - // automatically from `Locode` attribute based on `SubDiv` field. Presented - // in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format. - // * City \ - // City, town, village or rural area name where node is located written - // without diacritics . Calculated automatically from `Locode` attribute. - // - // For detailed description of each well-known attribute please see the - // corresponding section in FrostFS Technical specification. - message Attribute { - // Key of the node attribute. - string key = 1 [ json_name = "key" ]; - - // Value of the node attribute. - string value = 2 [ json_name = "value" ]; - - // Parent keys, if any. For example for `City` it could be `Region` and - // `Country`. - repeated string parents = 3 [ json_name = "parents" ]; - } - // Carries list of the FrostFS node attributes in a key-value form. Key name - // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo - // structures with duplicated attribute names or attributes with empty values - // will be considered invalid. - repeated Attribute attributes = 3 [ json_name = "attributes" ]; - - // Carries state of the FrostFS node. - NetmapStatus state = 4 [ json_name = "state" ]; -} - -// Network map structure. -message Netmap { - // Network map revision number. - uint64 epoch = 1 [ json_name = "epoch" ]; - - // Nodes presented in network. - repeated NodeInfo nodes = 2 [ json_name = "nodes" ]; -} - -// Health status of the storage node application. -enum HealthStatus { - // Undefined status, default value. - HEALTH_STATUS_UNDEFINED = 0; - - // Storage node application is starting. - STARTING = 1; - - // Storage node application is started and serves all services. - READY = 2; - - // Storage node application is shutting down. - SHUTTING_DOWN = 3; - - // Storage node application is reconfiguring. - RECONFIGURING = 4; -} - -// Shard description. -message ShardInfo { - // ID of the shard. - bytes shard_ID = 1 [ json_name = "shardID" ]; - - // Path to shard's metabase. - string metabase_path = 2 [ json_name = "metabasePath" ]; - - // Shard's blobstor info. - repeated BlobstorInfo blobstor = 3 [ json_name = "blobstor" ]; - - // Path to shard's write-cache, empty if disabled. - string writecache_path = 4 [ json_name = "writecachePath" ]; - - // Work mode of the shard. - ShardMode mode = 5; - - // Amount of errors occured. - uint32 errorCount = 6; - - // Path to shard's pilorama storage. - string pilorama_path = 7 [ json_name = "piloramaPath" ]; - - // Evacuation status. - bool evacuation_in_progress = 8 [ json_name = "evacuationInProgress" ]; -} - -// Blobstor component description. -message BlobstorInfo { - // Path to the root. - string path = 1 [ json_name = "path" ]; - // Component type. - string type = 2 [ json_name = "type" ]; -} - -// Work mode of the shard. -enum ShardMode { - // Undefined mode, default value. - SHARD_MODE_UNDEFINED = 0; - - // Read-write. - READ_WRITE = 1; - - // Read-only. - READ_ONLY = 2; - - // Degraded. - DEGRADED = 3; - - // DegradedReadOnly. - DEGRADED_READ_ONLY = 4; -} - -// ChainTarget is an object to which local overrides -// are applied. -message ChainTarget { - enum TargetType { - UNDEFINED = 0; - - NAMESPACE = 1; - - CONTAINER = 2; - - USER = 3; - - GROUP = 4; - } - - TargetType type = 1; - - string Name = 2; -} diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go deleted file mode 100644 index 69d87292d..000000000 --- a/pkg/services/control/types_frostfs.pb.go +++ /dev/null @@ -1,1909 +0,0 @@ -// Code generated by protoc-gen-go-frostfs. DO NOT EDIT. - -package control - -import ( - json "encoding/json" - fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" - easyproto "github.com/VictoriaMetrics/easyproto" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" - strconv "strconv" -) - -type NetmapStatus int32 - -const ( - NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0 - NetmapStatus_ONLINE NetmapStatus = 1 - NetmapStatus_OFFLINE NetmapStatus = 2 - NetmapStatus_MAINTENANCE NetmapStatus = 3 -) - -var ( - NetmapStatus_name = map[int32]string{ - 0: "STATUS_UNDEFINED", - 1: "ONLINE", - 2: "OFFLINE", - 3: "MAINTENANCE", - } - NetmapStatus_value = map[string]int32{ - "STATUS_UNDEFINED": 0, - "ONLINE": 1, - "OFFLINE": 2, - "MAINTENANCE": 3, - } -) - -func (x NetmapStatus) String() string { - if v, ok := NetmapStatus_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *NetmapStatus) FromString(s string) bool { - if v, ok := NetmapStatus_value[s]; ok { - *x = NetmapStatus(v) - return true - } - return false -} - -type HealthStatus int32 - -const ( - HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0 - HealthStatus_STARTING HealthStatus = 1 - HealthStatus_READY HealthStatus = 2 - HealthStatus_SHUTTING_DOWN HealthStatus = 3 - HealthStatus_RECONFIGURING HealthStatus = 4 -) - -var ( - HealthStatus_name = map[int32]string{ - 0: "HEALTH_STATUS_UNDEFINED", - 1: "STARTING", - 2: "READY", - 3: "SHUTTING_DOWN", - 4: "RECONFIGURING", - } - HealthStatus_value = map[string]int32{ - "HEALTH_STATUS_UNDEFINED": 0, - "STARTING": 1, - "READY": 2, - "SHUTTING_DOWN": 3, - "RECONFIGURING": 4, - } -) - -func (x HealthStatus) String() string { - if v, ok := HealthStatus_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *HealthStatus) FromString(s string) bool { - if v, ok := HealthStatus_value[s]; ok { - *x = HealthStatus(v) - return true - } - return false -} - -type ShardMode int32 - -const ( - ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0 - ShardMode_READ_WRITE ShardMode = 1 - ShardMode_READ_ONLY ShardMode = 2 - ShardMode_DEGRADED ShardMode = 3 - ShardMode_DEGRADED_READ_ONLY ShardMode = 4 -) - -var ( - ShardMode_name = map[int32]string{ - 0: "SHARD_MODE_UNDEFINED", - 1: "READ_WRITE", - 2: "READ_ONLY", - 3: "DEGRADED", - 4: "DEGRADED_READ_ONLY", - } - ShardMode_value = map[string]int32{ - "SHARD_MODE_UNDEFINED": 0, - "READ_WRITE": 1, - "READ_ONLY": 2, - "DEGRADED": 3, - "DEGRADED_READ_ONLY": 4, - } -) - -func (x ShardMode) String() string { - if v, ok := ShardMode_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *ShardMode) FromString(s string) bool { - if v, ok := ShardMode_value[s]; ok { - *x = ShardMode(v) - return true - } - return false -} - -type Signature struct { - Key []byte `json:"key"` - Sign []byte `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*Signature)(nil) - _ encoding.ProtoUnmarshaler = (*Signature)(nil) - _ json.Marshaler = (*Signature)(nil) - _ json.Unmarshaler = (*Signature)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *Signature) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Key) - size += proto.BytesSize(2, x.Sign) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *Signature) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Key) != 0 { - mm.AppendBytes(1, x.Key) - } - if len(x.Sign) != 0 { - mm.AppendBytes(2, x.Sign) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *Signature) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "Signature") - } - switch fc.FieldNum { - case 1: // Key - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Key") - } - x.Key = data - case 2: // Sign - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Sign") - } - x.Sign = data - } - } - return nil -} -func (x *Signature) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} -func (x *Signature) SetKey(v []byte) { - x.Key = v -} -func (x *Signature) GetSign() []byte { - if x != nil { - return x.Sign - } - return nil -} -func (x *Signature) SetSign(v []byte) { - x.Sign = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *Signature) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"key\":" - out.RawString(prefix) - if x.Key != nil { - out.Base64Bytes(x.Key) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - if x.Sign != nil { - out.Base64Bytes(x.Sign) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *Signature) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "key": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Key = f - } - case "signature": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Sign = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type NodeInfo_Attribute struct { - Key string `json:"key"` - Value string `json:"value"` - Parents []string `json:"parents"` -} - -var ( - _ encoding.ProtoMarshaler = (*NodeInfo_Attribute)(nil) - _ encoding.ProtoUnmarshaler = (*NodeInfo_Attribute)(nil) - _ json.Marshaler = (*NodeInfo_Attribute)(nil) - _ json.Unmarshaler = (*NodeInfo_Attribute)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *NodeInfo_Attribute) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.StringSize(1, x.Key) - size += proto.StringSize(2, x.Value) - size += proto.RepeatedStringSize(3, x.Parents) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *NodeInfo_Attribute) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *NodeInfo_Attribute) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Key) != 0 { - mm.AppendString(1, x.Key) - } - if len(x.Value) != 0 { - mm.AppendString(2, x.Value) - } - for j := range x.Parents { - mm.AppendString(3, x.Parents[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *NodeInfo_Attribute) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "NodeInfo_Attribute") - } - switch fc.FieldNum { - case 1: // Key - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Key") - } - x.Key = data - case 2: // Value - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Value") - } - x.Value = data - case 3: // Parents - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Parents") - } - x.Parents = append(x.Parents, data) - } - } - return nil -} -func (x *NodeInfo_Attribute) GetKey() string { - if x != nil { - return x.Key - } - return "" -} -func (x *NodeInfo_Attribute) SetKey(v string) { - x.Key = v -} -func (x *NodeInfo_Attribute) GetValue() string { - if x != nil { - return x.Value - } - return "" -} -func (x *NodeInfo_Attribute) SetValue(v string) { - x.Value = v -} -func (x *NodeInfo_Attribute) GetParents() []string { - if x != nil { - return x.Parents - } - return nil -} -func (x *NodeInfo_Attribute) SetParents(v []string) { - x.Parents = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *NodeInfo_Attribute) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"key\":" - out.RawString(prefix) - out.String(x.Key) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"value\":" - out.RawString(prefix) - out.String(x.Value) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parents\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Parents { - if i != 0 { - out.RawByte(',') - } - out.String(x.Parents[i]) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *NodeInfo_Attribute) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "key": - { - var f string - f = in.String() - x.Key = f - } - case "value": - { - var f string - f = in.String() - x.Value = f - } - case "parents": - { - var f string - var list []string - in.Delim('[') - for !in.IsDelim(']') { - f = in.String() - list = append(list, f) - in.WantComma() - } - x.Parents = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type NodeInfo struct { - PublicKey []byte `json:"publicKey"` - Addresses []string `json:"addresses"` - Attributes []NodeInfo_Attribute `json:"attributes"` - State NetmapStatus `json:"state"` -} - -var ( - _ encoding.ProtoMarshaler = (*NodeInfo)(nil) - _ encoding.ProtoUnmarshaler = (*NodeInfo)(nil) - _ json.Marshaler = (*NodeInfo)(nil) - _ json.Unmarshaler = (*NodeInfo)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *NodeInfo) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.PublicKey) - size += proto.RepeatedStringSize(2, x.Addresses) - for i := range x.Attributes { - size += proto.NestedStructureSizeUnchecked(3, &x.Attributes[i]) - } - size += proto.EnumSize(4, int32(x.State)) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *NodeInfo) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.PublicKey) != 0 { - mm.AppendBytes(1, x.PublicKey) - } - for j := range x.Addresses { - mm.AppendString(2, x.Addresses[j]) - } - for i := range x.Attributes { - x.Attributes[i].EmitProtobuf(mm.AppendMessage(3)) - } - if int32(x.State) != 0 { - mm.AppendInt32(4, int32(x.State)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "NodeInfo") - } - switch fc.FieldNum { - case 1: // PublicKey - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "PublicKey") - } - x.PublicKey = data - case 2: // Addresses - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Addresses") - } - x.Addresses = append(x.Addresses, data) - case 3: // Attributes - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Attributes") - } - x.Attributes = append(x.Attributes, NodeInfo_Attribute{}) - ff := &x.Attributes[len(x.Attributes)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 4: // State - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "State") - } - x.State = NetmapStatus(data) - } - } - return nil -} -func (x *NodeInfo) GetPublicKey() []byte { - if x != nil { - return x.PublicKey - } - return nil -} -func (x *NodeInfo) SetPublicKey(v []byte) { - x.PublicKey = v -} -func (x *NodeInfo) GetAddresses() []string { - if x != nil { - return x.Addresses - } - return nil -} -func (x *NodeInfo) SetAddresses(v []string) { - x.Addresses = v -} -func (x *NodeInfo) GetAttributes() []NodeInfo_Attribute { - if x != nil { - return x.Attributes - } - return nil -} -func (x *NodeInfo) SetAttributes(v []NodeInfo_Attribute) { - x.Attributes = v -} -func (x *NodeInfo) GetState() NetmapStatus { - if x != nil { - return x.State - } - return 0 -} -func (x *NodeInfo) SetState(v NetmapStatus) { - x.State = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *NodeInfo) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"publicKey\":" - out.RawString(prefix) - if x.PublicKey != nil { - out.Base64Bytes(x.PublicKey) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"addresses\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Addresses { - if i != 0 { - out.RawByte(',') - } - out.String(x.Addresses[i]) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"attributes\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Attributes { - if i != 0 { - out.RawByte(',') - } - x.Attributes[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"state\":" - out.RawString(prefix) - v := int32(x.State) - if vv, ok := NetmapStatus_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *NodeInfo) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "publicKey": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.PublicKey = f - } - case "addresses": - { - var f string - var list []string - in.Delim('[') - for !in.IsDelim(']') { - f = in.String() - list = append(list, f) - in.WantComma() - } - x.Addresses = list - in.Delim(']') - } - case "attributes": - { - var f NodeInfo_Attribute - var list []NodeInfo_Attribute - in.Delim('[') - for !in.IsDelim(']') { - f = NodeInfo_Attribute{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Attributes = list - in.Delim(']') - } - case "state": - { - var f NetmapStatus - var parsedValue NetmapStatus - switch v := in.Interface().(type) { - case string: - if vv, ok := NetmapStatus_value[v]; ok { - parsedValue = NetmapStatus(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = NetmapStatus(vv) - case float64: - parsedValue = NetmapStatus(v) - } - f = parsedValue - x.State = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type Netmap struct { - Epoch uint64 `json:"epoch"` - Nodes []NodeInfo `json:"nodes"` -} - -var ( - _ encoding.ProtoMarshaler = (*Netmap)(nil) - _ encoding.ProtoUnmarshaler = (*Netmap)(nil) - _ json.Marshaler = (*Netmap)(nil) - _ json.Unmarshaler = (*Netmap)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *Netmap) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt64Size(1, x.Epoch) - for i := range x.Nodes { - size += proto.NestedStructureSizeUnchecked(2, &x.Nodes[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *Netmap) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Epoch != 0 { - mm.AppendUint64(1, x.Epoch) - } - for i := range x.Nodes { - x.Nodes[i].EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "Netmap") - } - switch fc.FieldNum { - case 1: // Epoch - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Epoch") - } - x.Epoch = data - case 2: // Nodes - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Nodes") - } - x.Nodes = append(x.Nodes, NodeInfo{}) - ff := &x.Nodes[len(x.Nodes)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *Netmap) GetEpoch() uint64 { - if x != nil { - return x.Epoch - } - return 0 -} -func (x *Netmap) SetEpoch(v uint64) { - x.Epoch = v -} -func (x *Netmap) GetNodes() []NodeInfo { - if x != nil { - return x.Nodes - } - return nil -} -func (x *Netmap) SetNodes(v []NodeInfo) { - x.Nodes = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *Netmap) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"epoch\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodes\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Nodes { - if i != 0 { - out.RawByte(',') - } - x.Nodes[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *Netmap) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "epoch": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.Epoch = f - } - case "nodes": - { - var f NodeInfo - var list []NodeInfo - in.Delim('[') - for !in.IsDelim(']') { - f = NodeInfo{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Nodes = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ShardInfo struct { - Shard_ID []byte `json:"shardID"` - MetabasePath string `json:"metabasePath"` - Blobstor []BlobstorInfo `json:"blobstor"` - WritecachePath string `json:"writecachePath"` - Mode ShardMode `json:"mode"` - ErrorCount uint32 `json:"errorCount"` - PiloramaPath string `json:"piloramaPath"` - EvacuationInProgress bool `json:"evacuationInProgress"` -} - -var ( - _ encoding.ProtoMarshaler = (*ShardInfo)(nil) - _ encoding.ProtoUnmarshaler = (*ShardInfo)(nil) - _ json.Marshaler = (*ShardInfo)(nil) - _ json.Unmarshaler = (*ShardInfo)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ShardInfo) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Shard_ID) - size += proto.StringSize(2, x.MetabasePath) - for i := range x.Blobstor { - size += proto.NestedStructureSizeUnchecked(3, &x.Blobstor[i]) - } - size += proto.StringSize(4, x.WritecachePath) - size += proto.EnumSize(5, int32(x.Mode)) - size += proto.UInt32Size(6, x.ErrorCount) - size += proto.StringSize(7, x.PiloramaPath) - size += proto.BoolSize(8, x.EvacuationInProgress) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ShardInfo) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Shard_ID) != 0 { - mm.AppendBytes(1, x.Shard_ID) - } - if len(x.MetabasePath) != 0 { - mm.AppendString(2, x.MetabasePath) - } - for i := range x.Blobstor { - x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3)) - } - if len(x.WritecachePath) != 0 { - mm.AppendString(4, x.WritecachePath) - } - if int32(x.Mode) != 0 { - mm.AppendInt32(5, int32(x.Mode)) - } - if x.ErrorCount != 0 { - mm.AppendUint32(6, x.ErrorCount) - } - if len(x.PiloramaPath) != 0 { - mm.AppendString(7, x.PiloramaPath) - } - if x.EvacuationInProgress { - mm.AppendBool(8, x.EvacuationInProgress) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ShardInfo") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = data - case 2: // MetabasePath - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "MetabasePath") - } - x.MetabasePath = data - case 3: // Blobstor - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Blobstor") - } - x.Blobstor = append(x.Blobstor, BlobstorInfo{}) - ff := &x.Blobstor[len(x.Blobstor)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 4: // WritecachePath - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "WritecachePath") - } - x.WritecachePath = data - case 5: // Mode - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Mode") - } - x.Mode = ShardMode(data) - case 6: // ErrorCount - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ErrorCount") - } - x.ErrorCount = data - case 7: // PiloramaPath - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath") - } - x.PiloramaPath = data - case 8: // EvacuationInProgress - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "EvacuationInProgress") - } - x.EvacuationInProgress = data - } - } - return nil -} -func (x *ShardInfo) GetShard_ID() []byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *ShardInfo) SetShard_ID(v []byte) { - x.Shard_ID = v -} -func (x *ShardInfo) GetMetabasePath() string { - if x != nil { - return x.MetabasePath - } - return "" -} -func (x *ShardInfo) SetMetabasePath(v string) { - x.MetabasePath = v -} -func (x *ShardInfo) GetBlobstor() []BlobstorInfo { - if x != nil { - return x.Blobstor - } - return nil -} -func (x *ShardInfo) SetBlobstor(v []BlobstorInfo) { - x.Blobstor = v -} -func (x *ShardInfo) GetWritecachePath() string { - if x != nil { - return x.WritecachePath - } - return "" -} -func (x *ShardInfo) SetWritecachePath(v string) { - x.WritecachePath = v -} -func (x *ShardInfo) GetMode() ShardMode { - if x != nil { - return x.Mode - } - return 0 -} -func (x *ShardInfo) SetMode(v ShardMode) { - x.Mode = v -} -func (x *ShardInfo) GetErrorCount() uint32 { - if x != nil { - return x.ErrorCount - } - return 0 -} -func (x *ShardInfo) SetErrorCount(v uint32) { - x.ErrorCount = v -} -func (x *ShardInfo) GetPiloramaPath() string { - if x != nil { - return x.PiloramaPath - } - return "" -} -func (x *ShardInfo) SetPiloramaPath(v string) { - x.PiloramaPath = v -} -func (x *ShardInfo) GetEvacuationInProgress() bool { - if x != nil { - return x.EvacuationInProgress - } - return false -} -func (x *ShardInfo) SetEvacuationInProgress(v bool) { - x.EvacuationInProgress = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ShardInfo) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - if x.Shard_ID != nil { - out.Base64Bytes(x.Shard_ID) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"metabasePath\":" - out.RawString(prefix) - out.String(x.MetabasePath) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"blobstor\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Blobstor { - if i != 0 { - out.RawByte(',') - } - x.Blobstor[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"writecachePath\":" - out.RawString(prefix) - out.String(x.WritecachePath) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"mode\":" - out.RawString(prefix) - v := int32(x.Mode) - if vv, ok := ShardMode_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"errorCount\":" - out.RawString(prefix) - out.Uint32(x.ErrorCount) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"piloramaPath\":" - out.RawString(prefix) - out.String(x.PiloramaPath) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"evacuationInProgress\":" - out.RawString(prefix) - out.Bool(x.EvacuationInProgress) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ShardInfo) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Shard_ID = f - } - case "metabasePath": - { - var f string - f = in.String() - x.MetabasePath = f - } - case "blobstor": - { - var f BlobstorInfo - var list []BlobstorInfo - in.Delim('[') - for !in.IsDelim(']') { - f = BlobstorInfo{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Blobstor = list - in.Delim(']') - } - case "writecachePath": - { - var f string - f = in.String() - x.WritecachePath = f - } - case "mode": - { - var f ShardMode - var parsedValue ShardMode - switch v := in.Interface().(type) { - case string: - if vv, ok := ShardMode_value[v]; ok { - parsedValue = ShardMode(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = ShardMode(vv) - case float64: - parsedValue = ShardMode(v) - } - f = parsedValue - x.Mode = f - } - case "errorCount": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.ErrorCount = f - } - case "piloramaPath": - { - var f string - f = in.String() - x.PiloramaPath = f - } - case "evacuationInProgress": - { - var f bool - f = in.Bool() - x.EvacuationInProgress = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type BlobstorInfo struct { - Path string `json:"path"` - Type string `json:"type"` -} - -var ( - _ encoding.ProtoMarshaler = (*BlobstorInfo)(nil) - _ encoding.ProtoUnmarshaler = (*BlobstorInfo)(nil) - _ json.Marshaler = (*BlobstorInfo)(nil) - _ json.Unmarshaler = (*BlobstorInfo)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *BlobstorInfo) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.StringSize(1, x.Path) - size += proto.StringSize(2, x.Type) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *BlobstorInfo) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *BlobstorInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Path) != 0 { - mm.AppendString(1, x.Path) - } - if len(x.Type) != 0 { - mm.AppendString(2, x.Type) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *BlobstorInfo) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "BlobstorInfo") - } - switch fc.FieldNum { - case 1: // Path - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Path") - } - x.Path = data - case 2: // Type - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Type") - } - x.Type = data - } - } - return nil -} -func (x *BlobstorInfo) GetPath() string { - if x != nil { - return x.Path - } - return "" -} -func (x *BlobstorInfo) SetPath(v string) { - x.Path = v -} -func (x *BlobstorInfo) GetType() string { - if x != nil { - return x.Type - } - return "" -} -func (x *BlobstorInfo) SetType(v string) { - x.Type = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *BlobstorInfo) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"path\":" - out.RawString(prefix) - out.String(x.Path) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"type\":" - out.RawString(prefix) - out.String(x.Type) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *BlobstorInfo) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *BlobstorInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "path": - { - var f string - f = in.String() - x.Path = f - } - case "type": - { - var f string - f = in.String() - x.Type = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ChainTarget_TargetType int32 - -const ( - ChainTarget_UNDEFINED ChainTarget_TargetType = 0 - ChainTarget_NAMESPACE ChainTarget_TargetType = 1 - ChainTarget_CONTAINER ChainTarget_TargetType = 2 - ChainTarget_USER ChainTarget_TargetType = 3 - ChainTarget_GROUP ChainTarget_TargetType = 4 -) - -var ( - ChainTarget_TargetType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "NAMESPACE", - 2: "CONTAINER", - 3: "USER", - 4: "GROUP", - } - ChainTarget_TargetType_value = map[string]int32{ - "UNDEFINED": 0, - "NAMESPACE": 1, - "CONTAINER": 2, - "USER": 3, - "GROUP": 4, - } -) - -func (x ChainTarget_TargetType) String() string { - if v, ok := ChainTarget_TargetType_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *ChainTarget_TargetType) FromString(s string) bool { - if v, ok := ChainTarget_TargetType_value[s]; ok { - *x = ChainTarget_TargetType(v) - return true - } - return false -} - -type ChainTarget struct { - Type ChainTarget_TargetType `json:"type"` - Name string `json:"Name"` -} - -var ( - _ encoding.ProtoMarshaler = (*ChainTarget)(nil) - _ encoding.ProtoUnmarshaler = (*ChainTarget)(nil) - _ json.Marshaler = (*ChainTarget)(nil) - _ json.Unmarshaler = (*ChainTarget)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ChainTarget) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.EnumSize(1, int32(x.Type)) - size += proto.StringSize(2, x.Name) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ChainTarget) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ChainTarget) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if int32(x.Type) != 0 { - mm.AppendInt32(1, int32(x.Type)) - } - if len(x.Name) != 0 { - mm.AppendString(2, x.Name) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ChainTarget) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ChainTarget") - } - switch fc.FieldNum { - case 1: // Type - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Type") - } - x.Type = ChainTarget_TargetType(data) - case 2: // Name - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Name") - } - x.Name = data - } - } - return nil -} -func (x *ChainTarget) GetType() ChainTarget_TargetType { - if x != nil { - return x.Type - } - return 0 -} -func (x *ChainTarget) SetType(v ChainTarget_TargetType) { - x.Type = v -} -func (x *ChainTarget) GetName() string { - if x != nil { - return x.Name - } - return "" -} -func (x *ChainTarget) SetName(v string) { - x.Name = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ChainTarget) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"type\":" - out.RawString(prefix) - v := int32(x.Type) - if vv, ok := ChainTarget_TargetType_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"Name\":" - out.RawString(prefix) - out.String(x.Name) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ChainTarget) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ChainTarget) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "type": - { - var f ChainTarget_TargetType - var parsedValue ChainTarget_TargetType - switch v := in.Interface().(type) { - case string: - if vv, ok := ChainTarget_TargetType_value[v]; ok { - parsedValue = ChainTarget_TargetType(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = ChainTarget_TargetType(vv) - case float64: - parsedValue = ChainTarget_TargetType(v) - } - f = parsedValue - x.Type = f - } - case "Name": - { - var f string - f = in.String() - x.Name = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go deleted file mode 100644 index 1b92fdaad..000000000 --- a/pkg/services/netmap/executor.go +++ /dev/null @@ -1,135 +0,0 @@ -package netmap - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" -) - -type executorSvc struct { - version refs.Version - - state NodeState - - netInfo NetworkInfo - - respSvc *response.Service -} - -// NodeState encapsulates information -// about current node state. -type NodeState interface { - // LocalNodeInfo must return current node state - // in FrostFS API v2 NodeInfo structure. - LocalNodeInfo() *netmapSDK.NodeInfo - - // ReadCurrentNetMap reads current local network map of the storage node - // into the given parameter. Returns any error encountered which prevented - // the network map to be read. - ReadCurrentNetMap(*netmap.NetMap) error -} - -// NetworkInfo encapsulates source of the -// recent information about the FrostFS network. -type NetworkInfo interface { - // Dump must return recent network information in FrostFS API v2 NetworkInfo structure. - // - // If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset. - Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error) -} - -func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - // this should never happen, otherwise it's a programmer's bug - msg := "BUG: can't create netmap execution service" - assert.False(s == nil, msg, "node state is nil") - assert.False(netInfo == nil, msg, "network info is nil") - assert.False(respSvc == nil, msg, "response service is nil") - assert.True(version.IsValid(v), msg, "invalid version") - - res := &executorSvc{ - state: s, - netInfo: netInfo, - respSvc: respSvc, - } - - v.WriteToV2(&res.version) - - return res -} - -func (s *executorSvc) LocalNodeInfo( - _ context.Context, - _ *netmap.LocalNodeInfoRequest, -) (*netmap.LocalNodeInfoResponse, error) { - ni := s.state.LocalNodeInfo() - var nodeInfo netmap.NodeInfo - ni.WriteToV2(&nodeInfo) - - body := new(netmap.LocalNodeInfoResponseBody) - body.SetVersion(&s.version) - body.SetNodeInfo(&nodeInfo) - - resp := new(netmap.LocalNodeInfoResponse) - resp.SetBody(body) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) NetworkInfo( - ctx context.Context, - req *netmap.NetworkInfoRequest, -) (*netmap.NetworkInfoResponse, error) { - verV2 := req.GetMetaHeader().GetVersion() - if verV2 == nil { - return nil, errors.New("missing protocol version in meta header") - } - - var ver versionsdk.Version - if err := ver.ReadFromV2(*verV2); err != nil { - return nil, fmt.Errorf("can't read version: %w", err) - } - - ni, err := s.netInfo.Dump(ctx, ver) - if err != nil { - return nil, err - } - - var niV2 netmap.NetworkInfo - ni.WriteToV2(&niV2) - - body := new(netmap.NetworkInfoResponseBody) - body.SetNetworkInfo(&niV2) - - resp := new(netmap.NetworkInfoResponse) - resp.SetBody(body) - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *executorSvc) Snapshot(_ context.Context, _ *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) { - var nm netmap.NetMap - - err := s.state.ReadCurrentNetMap(&nm) - if err != nil { - return nil, fmt.Errorf("read current local network map: %w", err) - } - - body := new(netmap.SnapshotResponseBody) - body.SetNetMap(&nm) - - resp := new(netmap.SnapshotResponse) - resp.SetBody(body) - - s.respSvc.SetMeta(resp) - return resp, nil -} diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go deleted file mode 100644 index eff880dbe..000000000 --- a/pkg/services/netmap/server.go +++ /dev/null @@ -1,14 +0,0 @@ -package netmap - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" -) - -// Server is an interface of the FrostFS API Netmap service server. -type Server interface { - LocalNodeInfo(context.Context, *netmap.LocalNodeInfoRequest) (*netmap.LocalNodeInfoResponse, error) - NetworkInfo(context.Context, *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) - Snapshot(context.Context, *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) -} diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go deleted file mode 100644 index 5f184d5c0..000000000 --- a/pkg/services/netmap/sign.go +++ /dev/null @@ -1,52 +0,0 @@ -package netmap - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" -) - -type signService struct { - sigSvc *util.SignService - - svc Server -} - -func NewSignService(key *ecdsa.PrivateKey, svc Server) Server { - return &signService{ - sigSvc: util.NewUnarySignService(key), - svc: svc, - } -} - -func (s *signService) LocalNodeInfo( - ctx context.Context, - req *netmap.LocalNodeInfoRequest, -) (*netmap.LocalNodeInfoResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(netmap.LocalNodeInfoResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.LocalNodeInfo(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) NetworkInfo(ctx context.Context, req *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(netmap.NetworkInfoResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.NetworkInfo(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *signService) Snapshot(ctx context.Context, req *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(netmap.SnapshotResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Snapshot(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go deleted file mode 100644 index bb6067a37..000000000 --- a/pkg/services/object/ape/checker.go +++ /dev/null @@ -1,114 +0,0 @@ -package ape - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type checkerImpl struct { - checkerCore checkercore.CheckCore - frostFSIDClient frostfsidcore.SubjectProvider - headerProvider HeaderProvider - nm netmap.Source - cnrSource container.Source - nodePK []byte -} - -func NewChecker(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, headerProvider HeaderProvider, frostFSIDClient frostfsidcore.SubjectProvider, nm netmap.Source, st netmap.State, cnrSource container.Source, nodePK []byte) Checker { - return &checkerImpl{ - checkerCore: checkercore.New(localOverrideStorage, morphChainStorage, frostFSIDClient, st), - frostFSIDClient: frostFSIDClient, - headerProvider: headerProvider, - nm: nm, - cnrSource: cnrSource, - nodePK: nodePK, - } -} - -type Prm struct { - Namespace string - - Container cid.ID - - // Object ID is omitted for some methods. - Object *oid.ID - - // If Header is set, then object attributes and properties will be parsed from - // a request/response's header. - Header *objectV2.Header - - // Method must be represented only as a constant represented in native schema. - Method string - - // Role must be representedonly as a constant represented in native schema. - Role string - - // An encoded sender's public key string. - SenderKey string - - // An encoded container's owner user ID. - ContainerOwner user.ID - - // Attributes defined for the container. - ContainerAttributes map[string]string - - // The request's bearer token. It is used in order to check APE overrides with the token. - BearerToken *bearer.Token - - // XHeaders from the request. - XHeaders []session.XHeader -} - -var errMissingOID = errors.New("object ID is not set") - -// CheckAPE prepares an APE-request and checks if it is permitted by policies. -func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { - // APE check is ignored for some inter-node requests. - switch prm.Role { - case nativeschema.PropertyValueContainerRoleContainer: - return nil - case nativeschema.PropertyValueContainerRoleIR: - switch prm.Method { - case nativeschema.MethodGetObject, - nativeschema.MethodHeadObject, - nativeschema.MethodSearchObject, - nativeschema.MethodRangeObject, - nativeschema.MethodHashObject: - return nil - default: - } - } - - r, err := c.newAPERequest(ctx, prm) - if err != nil { - return fmt.Errorf("failed to create ape request: %w", err) - } - pub, err := keys.NewPublicKeyFromString(prm.SenderKey) - if err != nil { - return err - } - - return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{ - Request: r, - PublicKey: pub, - Namespace: prm.Namespace, - Container: prm.Container, - ContainerOwner: prm.ContainerOwner, - BearerToken: prm.BearerToken, - }) -} diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go deleted file mode 100644 index 97eb2b2d7..000000000 --- a/pkg/services/object/ape/checker_test.go +++ /dev/null @@ -1,782 +0,0 @@ -package ape - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -type headerProviderMock struct { - m map[oid.Address]*objectSDK.Object -} - -var _ HeaderProvider = (*headerProviderMock)(nil) - -func (h *headerProviderMock) addHeader(c cid.ID, o oid.ID, header *objectSDK.Object) { - var addr oid.Address - addr.SetContainer(c) - addr.SetObject(o) - h.m[addr] = header -} - -func (h *headerProviderMock) GetHeader(_ context.Context, c cid.ID, o oid.ID, _ bool) (*objectSDK.Object, error) { - var addr oid.Address - addr.SetContainer(c) - addr.SetObject(o) - obj, ok := h.m[addr] - if !ok { - return nil, fmt.Errorf("address not found") - } - return obj, nil -} - -func newHeaderProviderMock() *headerProviderMock { - return &headerProviderMock{ - m: make(map[oid.Address]*objectSDK.Object), - } -} - -func newContainerIDSDK(t *testing.T, encodedCID string) cid.ID { - var cnr cid.ID - require.NoError(t, cnr.DecodeString(encodedCID)) - return cnr -} - -func newObjectIDSDK(t *testing.T, encodedOID *string) *oid.ID { - if encodedOID == nil { - return nil - } - obj := new(oid.ID) - require.NoError(t, obj.DecodeString(*encodedOID)) - return obj -} - -type headerObjectSDKParams struct { - majorVersion, minorVersion uint32 - owner user.ID - epoch uint64 - payloadSize uint64 - typ objectSDK.Type - payloadChecksum checksum.Checksum - payloadHomomorphicHash checksum.Checksum - attributes []struct { - key string - val string - } -} - -func stringPtr(s string) *string { - return &s -} - -func newHeaderObjectSDK(cnr cid.ID, oid *oid.ID, headerObjSDK *headerObjectSDKParams) *objectSDK.Object { - objSDK := objectSDK.New() - objSDK.SetContainerID(cnr) - if oid != nil { - objSDK.SetID(*oid) - } - if headerObjSDK == nil { - return objSDK - } - ver := new(version.Version) - ver.SetMajor(headerObjSDK.majorVersion) - ver.SetMinor(headerObjSDK.minorVersion) - objSDK.SetVersion(ver) - objSDK.SetCreationEpoch(headerObjSDK.epoch) - objSDK.SetOwnerID(headerObjSDK.owner) - objSDK.SetPayloadSize(headerObjSDK.payloadSize) - objSDK.SetType(headerObjSDK.typ) - objSDK.SetPayloadChecksum(headerObjSDK.payloadChecksum) - objSDK.SetPayloadHomomorphicHash(headerObjSDK.payloadHomomorphicHash) - - var attrs []objectSDK.Attribute - for _, attr := range headerObjSDK.attributes { - attrSDK := objectSDK.NewAttribute() - attrSDK.SetKey(attr.key) - attrSDK.SetValue(attr.val) - attrs = append(attrs, *attrSDK) - } - objSDK.SetAttributes(attrs...) - - return objSDK -} - -type testHeader struct { - headerObjSDK *headerObjectSDKParams - - // If fromHeaderProvider is set, then running test should - // consider that a header is recieved from a header provider. - fromHeaderProvider bool - - // If fromHeaderProvider is set, then running test should - // consider that a header is recieved from a message header. - fromRequestResponseHeader bool -} - -var ( - methodsRequiredOID = []string{ - nativeschema.MethodGetObject, - nativeschema.MethodHeadObject, - nativeschema.MethodRangeObject, - nativeschema.MethodHashObject, - nativeschema.MethodDeleteObject, - } - - methodsOptionalOID = []string{ - nativeschema.MethodSearchObject, nativeschema.MethodPutObject, - } - - namespace = "test_namespace" - - containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy" - - objectID = "BzQw5HH3feoxFDD5tCT87Y1726qzgLfxEE7wgtoRzB3R" - - groupID = "1" - - role = "Container" - - senderPrivateKey, _ = keys.NewPrivateKey() - - senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes()) -) - -type frostfsIDProviderMock struct { - subjects map[util.Uint160]*client.Subject - subjectsExtended map[util.Uint160]*client.SubjectExtended -} - -var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil) - -func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock { - return &frostfsIDProviderMock{ - subjects: map[util.Uint160]*client.Subject{ - scriptHashFromSenderKey(t, senderKey): { - Namespace: "testnamespace", - Name: "test", - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExtended: map[util.Uint160]*client.SubjectExtended{ - scriptHashFromSenderKey(t, senderKey): { - Namespace: "testnamespace", - Name: "test", - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 1, - Name: "test", - Namespace: "testnamespace", - KV: map[string]string{ - "attr1": "value1", - "attr2": "value2", - }, - }, - }, - }, - }, - } -} - -func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { - pk, err := keys.NewPublicKeyFromString(senderKey) - require.NoError(t, err) - return pk.GetScriptHash() -} - -func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { - v, ok := f.subjects[key] - if !ok { - return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) - } - return v, nil -} - -func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { - v, ok := f.subjectsExtended[key] - if !ok { - return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) - } - return v, nil -} - -var apeCheckTestCases = []struct { - name string - container string - object *string - methods []string - header testHeader - xHeaders []session.XHeader - containerRules []chain.Rule - groupidRules []chain.Rule - expectAPEErr bool -}{ - { - name: "oid required requests are allowed", - container: containerID, - object: stringPtr(objectID), - methods: methodsRequiredOID, - containerRules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)}, - }, - }, - }, - }, - { - name: "oid optional requests are allowed", - container: containerID, - methods: methodsOptionalOID, - containerRules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{Names: methodsOptionalOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - }, - }, - }, - { - name: "oid required requests are denied", - container: containerID, - object: stringPtr(objectID), - methods: methodsRequiredOID, - containerRules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)}, - }, - }, - }, - expectAPEErr: true, - }, - { - name: "oid required requests are denied by an attribute", - container: containerID, - object: stringPtr(objectID), - methods: methodsRequiredOID, - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - attributes: []struct { - key string - val string - }{ - { - key: "attr1", - val: "attribute_value", - }, - }, - }, - fromHeaderProvider: true, - }, - containerRules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)}, - }, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringLike, - Kind: chain.KindResource, - Key: "attr1", - Value: "attribute*", - }, - }, - }, - }, - expectAPEErr: true, - }, - { - name: "oid required requests are denied by sender", - container: containerID, - object: stringPtr(objectID), - methods: methodsRequiredOID, - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - attributes: []struct { - key string - val string - }{ - { - key: "attr1", - val: "attribute_value", - }, - }, - }, - fromHeaderProvider: true, - }, - containerRules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)}, - }, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringLike, - Kind: chain.KindRequest, - Key: nativeschema.PropertyKeyActorPublicKey, - Value: senderKey, - }, - }, - }, - }, - expectAPEErr: true, - }, - { - name: "oid required requests are denied by xheader", - container: containerID, - object: stringPtr(objectID), - methods: methodsRequiredOID, - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - attributes: []struct { - key string - val string - }{ - { - key: "attr1", - val: "attribute_value", - }, - }, - }, - fromHeaderProvider: true, - }, - xHeaders: []session.XHeader{ - func() (xhead session.XHeader) { - xhead.SetKey("X-Test-ID") - xhead.SetValue("aezakmi") - return - }(), - }, - containerRules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)}, - }, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringLike, - Kind: chain.KindRequest, - Key: fmt.Sprintf(commonschema.PropertyKeyFrostFSXHeader, "X-Test-ID"), - Value: "aezakmi", - }, - }, - }, - }, - expectAPEErr: true, - }, - { - name: "optional oid requests reached quota limit by an attribute", - container: containerID, - methods: methodsOptionalOID, - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - payloadSize: 1000, - }, - fromRequestResponseHeader: true, - }, - containerRules: []chain.Rule{ - { - Status: chain.QuotaLimitReached, - Actions: chain.Actions{Names: methodsOptionalOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindResource, - Key: nativeschema.PropertyKeyObjectPayloadLength, - Value: "1000", - }, - }, - }, - }, - expectAPEErr: true, - }, - { - name: "optional oid requests reached quota limit by group-id", - container: containerID, - methods: methodsOptionalOID, - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - payloadSize: 1000, - }, - fromRequestResponseHeader: true, - }, - groupidRules: []chain.Rule{ - { - Status: chain.QuotaLimitReached, - Actions: chain.Actions{Names: methodsOptionalOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindRequest, - Key: commonschema.PropertyKeyFrostFSIDGroupID, - Value: groupID, - }, - }, - }, - }, - expectAPEErr: true, - }, -} - -type stMock struct{} - -func (m *stMock) CurrentEpoch() uint64 { - return 8 -} - -func TestAPECheck_BearerTokenOverrides(t *testing.T) { - for _, test := range apeCheckTestCases { - t.Run(test.name, func(t *testing.T) { - chain := chain.Chain{ - Rules: test.containerRules, - MatchType: chain.MatchTypeFirstMatch, - } - chainSDK := apeSDK.Chain{ - Raw: chain.Bytes(), - } - bt := new(bearer.Token) - bt.SetIat(1) - bt.SetExp(10) - bt.SetAPEOverride(bearer.APEOverride{ - Target: apeSDK.ChainTarget{ - TargetType: apeSDK.TargetTypeContainer, - Name: test.container, - }, - Chains: []apeSDK.Chain{chainSDK}, - }) - bt.Sign(senderPrivateKey.PrivateKey) - var cnrOwner user.ID - user.IDFromKey(&cnrOwner, (ecdsa.PublicKey)(*senderPrivateKey.PublicKey())) - - for _, method := range test.methods { - t.Run(method, func(t *testing.T) { - headerProvider := newHeaderProviderMock() - frostfsidProvider := newFrostfsIDProviderMock(t) - - cnr := newContainerIDSDK(t, test.container) - obj := newObjectIDSDK(t, test.object) - - ls := inmemory.NewInmemoryLocalStorage() - ms := inmemory.NewInmemoryMorphRuleChainStorage() - - checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil) - - prm := Prm{ - Method: method, - Container: cnr, - Object: obj, - Role: role, - ContainerOwner: cnrOwner, - SenderKey: senderKey, - BearerToken: bt, - } - - var headerObjSDK *objectSDK.Object - if test.header.headerObjSDK != nil { - headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK) - if test.header.fromHeaderProvider { - require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider") - headerProvider.addHeader(cnr, *obj, headerObjSDK) - } else if test.header.fromRequestResponseHeader { - prm.Header = headerObjSDK.ToV2().GetHeader() - } - } - - err := checker.CheckAPE(context.Background(), prm) - if test.expectAPEErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } - }) - } -} - -func TestAPECheck(t *testing.T) { - for _, test := range apeCheckTestCases { - t.Run(test.name, func(t *testing.T) { - for _, method := range test.methods { - t.Run(method, func(t *testing.T) { - headerProvider := newHeaderProviderMock() - frostfsidProvider := newFrostfsIDProviderMock(t) - - cnr := newContainerIDSDK(t, test.container) - obj := newObjectIDSDK(t, test.object) - - ls := inmemory.NewInmemoryLocalStorage() - ms := inmemory.NewInmemoryMorphRuleChainStorage() - - if len(test.containerRules) > 0 { - ls.AddOverride(chain.Ingress, policyengine.ContainerTarget(test.container), &chain.Chain{ - Rules: test.containerRules, - MatchType: chain.MatchTypeFirstMatch, - }) - } - - if len(test.groupidRules) > 0 { - ls.AddOverride(chain.Ingress, policyengine.GroupTarget(":"+groupID), &chain.Chain{ - Rules: test.groupidRules, - MatchType: chain.MatchTypeFirstMatch, - }) - } - - checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil) - - prm := Prm{ - Method: method, - Container: cnr, - Object: obj, - Role: role, - SenderKey: senderKey, - } - - var headerObjSDK *objectSDK.Object - if test.header.headerObjSDK != nil { - headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK) - if test.header.fromHeaderProvider { - require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider") - headerProvider.addHeader(cnr, *obj, headerObjSDK) - } else if test.header.fromRequestResponseHeader { - prm.Header = headerObjSDK.ToV2().GetHeader() - } - } - - err := checker.CheckAPE(context.Background(), prm) - if test.expectAPEErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } - }) - } -} - -type netmapStub struct { - netmaps map[uint64]*netmapSDK.NetMap - currentEpoch uint64 -} - -func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { - if diff >= s.currentEpoch { - return nil, errors.New("invalid diff") - } - return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) -} - -func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - if nm, found := s.netmaps[epoch]; found { - return nm, nil - } - return nil, errors.New("netmap not found") -} - -func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { - return s.currentEpoch, nil -} - -type testContainerSource struct { - containers map[cid.ID]*container.Container -} - -func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { - if cnr, found := s.containers[cnrID]; found { - return cnr, nil - } - return nil, fmt.Errorf("container not found") -} - -func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { - return nil, nil -} - -func TestGetECChunk(t *testing.T) { - headerProvider := newHeaderProviderMock() - frostfsidProvider := newFrostfsIDProviderMock(t) - - cnr := newContainerIDSDK(t, containerID) - obj := newObjectIDSDK(t, &objectID) - - ls := inmemory.NewInmemoryLocalStorage() - ms := inmemory.NewInmemoryMorphRuleChainStorage() - - ls.AddOverride(chain.Ingress, policyengine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindResource, - Key: "attr1", - Value: "value", - }, - }, - }, - { - Status: chain.Allow, - Actions: chain.Actions{Names: methodsRequiredOID}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - }, - }, - }) - - node1Key, err := keys.NewPrivateKey() - require.NoError(t, err) - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey(node1Key.PublicKey().Bytes()) - node2Key, err := keys.NewPrivateKey() - require.NoError(t, err) - node2 := netmapSDK.NodeInfo{} - node2.SetPublicKey(node1Key.PublicKey().Bytes()) - netmap := &netmapSDK.NetMap{} - netmap.SetEpoch(100) - netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2}) - - nm := &netmapStub{ - currentEpoch: 100, - netmaps: map[uint64]*netmapSDK.NetMap{ - 99: netmap, - 100: netmap, - }, - } - - cont := containerSDK.Container{} - cont.Init() - pp := netmapSDK.PlacementPolicy{} - require.NoError(t, pp.DecodeString("EC 1.1")) - cont.SetPlacementPolicy(pp) - cs := &testContainerSource{ - containers: map[cid.ID]*container.Container{ - cnr: { - Value: cont, - }, - }, - } - - checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, node1Key.PublicKey().Bytes()) - - ecParentID := oidtest.ID() - chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader() - ecHeader := object.ECHeader{ - Index: 1, - Total: 2, - Parent: &refs.ObjectID{}, - } - chunkHeader.SetEC(&ecHeader) - ecParentID.WriteToV2(ecHeader.Parent) - - parentHeader := newHeaderObjectSDK(cnr, &ecParentID, &headerObjectSDKParams{ - attributes: []struct { - key string - val string - }{ - { - key: "attr1", - val: "value", - }, - }, - }) - headerProvider.addHeader(cnr, ecParentID, parentHeader) - - // container node requests EC parent headers, so container node denies access by matching attribute key/value - t.Run("access denied on container node", func(t *testing.T) { - prm := Prm{ - Method: nativeschema.MethodGetObject, - Container: cnr, - Object: obj, - Role: role, - SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()), - Header: chunkHeader, - } - - err = checker.CheckAPE(context.Background(), prm) - require.Error(t, err) - }) - - // non container node has no access rights to collect EC parent header, so it uses EC chunk headers - t.Run("access allowed on non container node", func(t *testing.T) { - otherKey, err := keys.NewPrivateKey() - require.NoError(t, err) - checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes()) - prm := Prm{ - Method: nativeschema.MethodGetObject, - Container: cnr, - Object: obj, - Role: nativeschema.PropertyValueContainerRoleOthers, - SenderKey: senderKey, - Header: chunkHeader, - } - - err = checker.CheckAPE(context.Background(), prm) - require.NoError(t, err) - }) -} diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go deleted file mode 100644 index 82e660a7f..000000000 --- a/pkg/services/object/ape/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -package ape - -import ( - "errors" - - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -var ( - errMissingContainerID = malformedRequestError("missing container ID") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) - -func malformedRequestError(reason string) error { - invalidArgErr := &apistatus.InvalidArgument{} - invalidArgErr.SetMessage(reason) - return invalidArgErr -} - -func toStatusErr(err error) error { - var chRouterErr *checkercore.ChainRouterError - if !errors.As(err, &chRouterErr) { - errServerInternal := &apistatus.ServerInternal{} - apistatus.WriteInternalServerErr(errServerInternal, err) - return errServerInternal - } - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason("ape denied request: " + err.Error()) - return errAccessDenied -} diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go deleted file mode 100644 index 102985aa6..000000000 --- a/pkg/services/object/ape/metadata.go +++ /dev/null @@ -1,179 +0,0 @@ -package ape - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type Metadata struct { - Container cid.ID - Object *oid.ID - MetaHeader *session.RequestMetaHeader - VerificationHeader *session.RequestVerificationHeader - SessionToken *sessionSDK.Object - BearerToken *bearer.Token -} - -func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if m.VerificationHeader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if m.BearerToken != nil && m.BearerToken.Impersonate() { - return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if m.SessionToken != nil { - // verify signature of session token - return ownerFromToken(m.SessionToken) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(m.VerificationHeader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -// RequestInfo contains request information extracted by request metadata. -type RequestInfo struct { - // Role defines under which role this request is executed. - // It must be represented only as a constant represented in native schema. - Role string - - ContainerOwner user.ID - - ContainerAttributes map[string]string - - // Namespace defines to which namespace a container is belonged. - Namespace string - - // HEX-encoded sender key. - SenderKey string -} - -type RequestInfoExtractor interface { - GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) -} - -type extractor struct { - containers container.Source - - nm netmap.Source - - classifier objectCore.SenderClassifier -} - -func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { - return &extractor{ - containers: containers, - nm: nm, - classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), - } -} - -func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { - currentEpoch, err := e.nm.Epoch(ctx) - if err != nil { - return errors.New("can't fetch current epoch") - } - if sessionToken.ExpiredAt(currentEpoch) { - return new(apistatus.SessionTokenExpired) - } - if sessionToken.InvalidAt(currentEpoch) { - return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) - } - if !assertVerb(*sessionToken, method) { - return errInvalidVerb - } - return nil -} - -func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { - cnr, err := e.containers.Get(ctx, m.Container) - if err != nil { - return ri, err - } - - if m.SessionToken != nil { - if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { - return ri, err - } - } - - ownerID, ownerKey, err := m.RequestOwner() - if err != nil { - return ri, err - } - res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) - if err != nil { - return ri, err - } - - ri.Role = nativeSchemaRole(res.Role) - ri.ContainerOwner = cnr.Value.Owner() - - ri.ContainerAttributes = map[string]string{} - for key, val := range cnr.Value.Attributes() { - ri.ContainerAttributes[key] = val - } - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - ri.Namespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - ri.SenderKey = hex.EncodeToString(res.Key) - - return ri, nil -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/ape/metadata_test.go deleted file mode 100644 index fd919008f..000000000 --- a/pkg/services/object/ape/metadata_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package ape - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestRequestOwner(t *testing.T) { - containerOwner, err := keys.NewPrivateKey() - require.NoError(t, err) - - userPk, err := keys.NewPrivateKey() - require.NoError(t, err) - - var userID user.ID - user.IDFromKey(&userID, userPk.PrivateKey.PublicKey) - - var userSignature refs.Signature - userSignature.SetKey(userPk.PublicKey().Bytes()) - - vh := new(sessionV2.RequestVerificationHeader) - vh.SetBodySignature(&userSignature) - - t.Run("empty verification header", func(t *testing.T) { - req := Metadata{} - checkOwner(t, req, nil, errEmptyVerificationHeader) - }) - t.Run("empty verification header signature", func(t *testing.T) { - req := Metadata{ - VerificationHeader: new(sessionV2.RequestVerificationHeader), - } - checkOwner(t, req, nil, errEmptyBodySig) - }) - t.Run("no tokens", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - } - checkOwner(t, req, userPk.PublicKey(), nil) - }) - - t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, false), - } - checkOwner(t, req, userPk.PublicKey(), nil) - }) - t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, true), - } - checkOwner(t, req, containerOwner.PublicKey(), nil) - }) - t.Run("bearer with impersonate, with session", func(t *testing.T) { - // To check that bearer token takes priority, use different key to sign session token. - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, true), - SessionToken: newSession(t, pk), - } - checkOwner(t, req, containerOwner.PublicKey(), nil) - }) - t.Run("with session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - SessionToken: newSession(t, containerOwner), - } - checkOwner(t, req, containerOwner.PublicKey(), nil) - }) - t.Run("malformed session token", func(t *testing.T) { - // This test is tricky: session token has issuer field and signature, which must correspond to each other. - // SDK prevents constructing such token in the first place, but it is still possible via API. - // Thus, construct v2 token, convert it to SDK one and pass to our function. - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - var user1 user.ID - user.IDFromKey(&user1, pk.PrivateKey.PublicKey) - - var id refs.OwnerID - id.SetValue(user1.WalletBytes()) - - raw, err := uuid.New().MarshalBinary() - require.NoError(t, err) - - var cidV2 refs.ContainerID - cidtest.ID().WriteToV2(&cidV2) - - sessionCtx := new(sessionV2.ObjectSessionContext) - sessionCtx.SetTarget(&cidV2) - - var body sessionV2.TokenBody - body.SetOwnerID(&id) - body.SetID(raw) - body.SetLifetime(new(sessionV2.TokenLifetime)) - body.SetSessionKey(pk.PublicKey().Bytes()) - body.SetContext(sessionCtx) - - var tokV2 sessionV2.Token - tokV2.SetBody(&body) - require.NoError(t, sigutilV2.SignData(&containerOwner.PrivateKey, smWrapper{Token: &tokV2})) - require.NoError(t, sigutilV2.VerifyData(smWrapper{Token: &tokV2})) - - var tok sessionSDK.Object - require.NoError(t, tok.ReadFromV2(tokV2)) - - req := Metadata{ - VerificationHeader: vh, - SessionToken: &tok, - } - checkOwner(t, req, nil, errInvalidSessionOwner) - }) -} - -type smWrapper struct { - *sessionV2.Token -} - -func (s smWrapper) ReadSignedData(data []byte) ([]byte, error) { - return s.Token.GetBody().StableMarshal(data), nil -} - -func (s smWrapper) SignedDataSize() int { - return s.Token.GetBody().StableSize() -} - -func newSession(t *testing.T, pk *keys.PrivateKey) *sessionSDK.Object { - var tok sessionSDK.Object - require.NoError(t, tok.Sign(pk.PrivateKey)) - return &tok -} - -func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool) *bearer.Token { - var tok bearer.Token - tok.SetImpersonate(impersonate) - tok.ForUser(user) - require.NoError(t, tok.Sign(pk.PrivateKey)) - return &tok -} - -func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { - _, actual, err := req.RequestOwner() - if expectedErr != nil { - require.ErrorIs(t, err, expectedErr) - return - } - - require.NoError(t, err) - require.Equal(t, expected, actual) -} diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go deleted file mode 100644 index 39dd7f476..000000000 --- a/pkg/services/object/ape/request.go +++ /dev/null @@ -1,253 +0,0 @@ -package ape - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "net" - "strconv" - - aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/peer" -) - -var defaultRequest = aperequest.Request{} - -var errECMissingParentObjectID = errors.New("missing EC parent object ID") - -func nativeSchemaRole(role acl.Role) string { - switch role { - case acl.RoleOwner: - return nativeschema.PropertyValueContainerRoleOwner - case acl.RoleContainer: - return nativeschema.PropertyValueContainerRoleContainer - case acl.RoleInnerRing: - return nativeschema.PropertyValueContainerRoleIR - case acl.RoleOthers: - return nativeschema.PropertyValueContainerRoleOthers - default: - return "" - } -} - -func resourceName(cid cid.ID, oid *oid.ID, namespace string) string { - if namespace == "root" || namespace == "" { - if oid != nil { - return fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, cid.EncodeToString(), oid.EncodeToString()) - } - return fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString()) - } - if oid != nil { - return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObject, namespace, cid.EncodeToString(), oid.EncodeToString()) - } - return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) -} - -// objectProperties collects object properties from address parameters and a header if it is passed. -func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string { - objectProps := map[string]string{ - nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(), - } - - for attrName, attrValue := range cnrAttrs { - prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName) - objectProps[prop] = attrValue - } - - objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString() - - if oid != nil { - objectProps[nativeschema.PropertyKeyObjectID] = oid.String() - } - - if header == nil { - return objectProps - } - - objV2 := new(objectV2.Object) - objV2.SetHeader(header) - objSDK := objectSDK.NewFromV2(objV2) - - objectProps[nativeschema.PropertyKeyObjectVersion] = objSDK.Version().String() - objectProps[nativeschema.PropertyKeyObjectOwnerID] = objSDK.OwnerID().EncodeToString() - objectProps[nativeschema.PropertyKeyObjectCreationEpoch] = strconv.Itoa(int(objSDK.CreationEpoch())) - objectProps[nativeschema.PropertyKeyObjectPayloadLength] = strconv.Itoa(int(objSDK.PayloadSize())) - objectProps[nativeschema.PropertyKeyObjectType] = objSDK.Type().String() - - pcs, isSet := objSDK.PayloadChecksum() - if isSet { - objectProps[nativeschema.PropertyKeyObjectPayloadHash] = pcs.String() - } - hcs, isSet := objSDK.PayloadHomomorphicHash() - if isSet { - objectProps[nativeschema.PropertyKeyObjectHomomorphicHash] = hcs.String() - } - - for _, attr := range header.GetAttributes() { - objectProps[attr.GetKey()] = attr.GetValue() - } - - return objectProps -} - -// newAPERequest creates an APE request to be passed to a chain router. It collects resource properties from -// header provided by headerProvider. If it cannot be found in headerProvider, then properties are -// initialized from header given in prm (if it is set). Otherwise, just CID and OID are set to properties. -func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Request, error) { - switch prm.Method { - case nativeschema.MethodGetObject, - nativeschema.MethodHeadObject, - nativeschema.MethodRangeObject, - nativeschema.MethodHashObject, - nativeschema.MethodDeleteObject, - nativeschema.MethodPatchObject: - if prm.Object == nil { - return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID) - } - case nativeschema.MethodSearchObject, nativeschema.MethodPutObject: - default: - return defaultRequest, fmt.Errorf("unknown method: %s", prm.Method) - } - - var header *objectV2.Header - if prm.Header != nil { - header = prm.Header - } else if prm.Object != nil { - headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true) - if err == nil { - header = headerObjSDK.ToV2().GetHeader() - } - } - header, err := c.fillHeaderWithECParent(ctx, prm, header) - if err != nil { - return defaultRequest, fmt.Errorf("get EC parent header: %w", err) - } - reqProps := map[string]string{ - nativeschema.PropertyKeyActorPublicKey: prm.SenderKey, - nativeschema.PropertyKeyActorRole: prm.Role, - } - - for _, xhead := range prm.XHeaders { - xheadKey := fmt.Sprintf(commonschema.PropertyKeyFrostFSXHeader, xhead.GetKey()) - reqProps[xheadKey] = xhead.GetValue() - } - - reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm) - if err != nil { - return defaultRequest, err - } - - if p, ok := peer.FromContext(ctx); ok { - if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { - reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() - } - } - - return aperequest.NewRequest( - prm.Method, - aperequest.NewResource( - resourceName(prm.Container, prm.Object, prm.Namespace), - objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header), - ), - reqProps, - ), nil -} - -func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) { - if header == nil { - return header, nil - } - if header.GetEC() == nil { - return header, nil - } - parentObjRefID := header.GetEC().Parent - if parentObjRefID == nil { - return nil, errECMissingParentObjectID - } - var parentObjID oid.ID - if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil { - return nil, fmt.Errorf("EC parent object ID format error: %w", err) - } - // only container node have access to collect parent object - contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container) - if err != nil { - return nil, fmt.Errorf("check container node status: %w", err) - } - if !contNode { - return header, nil - } - parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false) - if err != nil { - if isLogicalError(err) { - return header, nil - } - return nil, fmt.Errorf("EC parent header request: %w", err) - } - return parentObj.ToV2().GetHeader(), nil -} - -func isLogicalError(err error) bool { - var errObjRemoved *apistatus.ObjectAlreadyRemoved - var errObjNotFound *apistatus.ObjectNotFound - return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound) -} - -func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) { - cnr, err := c.cnrSource.Get(ctx, cnrID) - if err != nil { - return false, err - } - - nm, err := netmap.GetLatestNetworkMap(ctx, c.nm) - if err != nil { - return false, err - } - idCnr := make([]byte, sha256.Size) - cnrID.Encode(idCnr) - - in, err := object.LookupKeyInContainer(nm, c.nodePK, idCnr, cnr.Value) - if err != nil { - return false, err - } else if in { - return true, nil - } - - nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm) - if err != nil { - return false, err - } - - return object.LookupKeyInContainer(nm, c.nodePK, idCnr, cnr.Value) -} - -// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) { - if reqProps == nil { - reqProps = make(map[string]string) - } - pk, err := keys.NewPublicKeyFromString(prm.SenderKey) - if err != nil { - return nil, err - } - props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk) - if err != nil { - return reqProps, err - } - for propertyName, properyValue := range props { - reqProps[propertyName] = properyValue - } - return reqProps, nil -} diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go deleted file mode 100644 index fcf7c4c40..000000000 --- a/pkg/services/object/ape/request_test.go +++ /dev/null @@ -1,373 +0,0 @@ -package ape - -import ( - "context" - "fmt" - "net" - "testing" - - aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/peer" -) - -const ( - testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y" - - incomingIP = "192.92.33.1" - - testSysAttrName = "unittest" - - testSysAttrZone = "eggplant" -) - -var containerAttrs = map[string]string{ - cnrV2.SysAttributeName: testSysAttrName, - cnrV2.SysAttributeZone: testSysAttrZone, -} - -func ctxWithPeerInfo() context.Context { - return peer.NewContext(context.Background(), &peer.Peer{ - Addr: &net.TCPAddr{ - IP: net.ParseIP(incomingIP), - Port: 41111, - }, - }) -} - -func TestObjectProperties(t *testing.T) { - for _, test := range []struct { - name string - container string - object *string - header *headerObjectSDKParams - }{ - { - name: "fully filled header", - container: containerID, - object: stringPtr(objectID), - header: &headerObjectSDKParams{ - majorVersion: 1, - minorVersion: 1, - owner: usertest.ID(), - epoch: 3, - payloadSize: 1000, - typ: objectSDK.TypeRegular, - payloadChecksum: checksumtest.Checksum(), - payloadHomomorphicHash: checksumtest.Checksum(), - attributes: []struct { - key string - val string - }{ - { - key: "attr1", - val: "val1", - }, - { - key: "attr2", - val: "val2", - }, - }, - }, - }, - { - name: "partially filled header", - container: containerID, - header: &headerObjectSDKParams{ - majorVersion: 1, - minorVersion: 1, - owner: usertest.ID(), - epoch: 3, - attributes: []struct { - key string - val string - }{ - { - key: "attr1", - val: "val1", - }, - }, - }, - }, - { - name: "only address paramaters set in header", - container: containerID, - object: stringPtr(objectID), - }, - { - name: "only container set in header", - container: containerID, - }, - } { - t.Run(test.name, func(t *testing.T) { - cnr := newContainerIDSDK(t, test.container) - obj := newObjectIDSDK(t, test.object) - header := newHeaderObjectSDK(cnr, obj, test.header) - - var testCnrOwner user.ID - require.NoError(t, testCnrOwner.DecodeString(testOwnerID)) - - props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader()) - require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID]) - require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID]) - - if obj != nil { - require.Equal(t, *test.object, props[nativeschema.PropertyKeyObjectID]) - } - - if test.header != nil { - require.Equal(t, - fmt.Sprintf("v%d.%d", test.header.majorVersion, test.header.minorVersion), - props[nativeschema.PropertyKeyObjectVersion], - ) - require.Equal(t, test.header.owner.EncodeToString(), props[nativeschema.PropertyKeyObjectOwnerID]) - require.Equal(t, fmt.Sprintf("%d", test.header.epoch), props[nativeschema.PropertyKeyObjectCreationEpoch]) - require.Equal(t, fmt.Sprintf("%d", test.header.payloadSize), props[nativeschema.PropertyKeyObjectPayloadLength]) - require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType]) - require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash]) - require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash]) - require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)]) - require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)]) - - for _, attr := range test.header.attributes { - require.Equal(t, attr.val, props[attr.key]) - } - } - }) - } -} - -func TestNewAPERequest(t *testing.T) { - tests := []struct { - name string - methods []string - namespace string - container string - object *string - header testHeader - expectErr error - }{ - { - name: "oid required requests", - methods: methodsRequiredOID, - namespace: namespace, - container: containerID, - object: stringPtr(objectID), - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - majorVersion: 1, - minorVersion: 1, - owner: usertest.ID(), - epoch: 3, - payloadSize: 1000, - typ: objectSDK.TypeRegular, - payloadChecksum: checksumtest.Checksum(), - payloadHomomorphicHash: checksumtest.Checksum(), - }, - fromHeaderProvider: true, - }, - }, - { - name: "oid required requests but header cannot be found locally", - methods: methodsRequiredOID, - namespace: namespace, - container: containerID, - object: stringPtr(objectID), - header: testHeader{}, - }, - { - name: "oid required requests missed oid", - methods: methodsRequiredOID, - namespace: namespace, - container: containerID, - object: nil, - header: testHeader{}, - expectErr: errMissingOID, - }, - { - name: "response for oid required requests", - methods: methodsRequiredOID, - namespace: namespace, - container: containerID, - object: stringPtr(objectID), - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - majorVersion: 1, - minorVersion: 1, - owner: usertest.ID(), - epoch: 3, - payloadSize: 1000, - typ: objectSDK.TypeRegular, - payloadChecksum: checksumtest.Checksum(), - payloadHomomorphicHash: checksumtest.Checksum(), - }, - fromRequestResponseHeader: true, - }, - }, - { - name: "oid not required methods request", - methods: methodsOptionalOID, - namespace: namespace, - container: containerID, - object: nil, - header: testHeader{ - headerObjSDK: &headerObjectSDKParams{ - majorVersion: 6, - minorVersion: 66, - owner: usertest.ID(), - epoch: 3, - typ: objectSDK.TypeLock, - }, - fromRequestResponseHeader: true, - }, - }, - { - name: "oid not required methods request but no header", - methods: methodsOptionalOID, - namespace: namespace, - container: containerID, - object: nil, - header: testHeader{}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - for _, method := range test.methods { - t.Run(method, func(t *testing.T) { - cnr := newContainerIDSDK(t, test.container) - obj := newObjectIDSDK(t, test.object) - - var testCnrOwner user.ID - require.NoError(t, testCnrOwner.DecodeString(testOwnerID)) - - prm := Prm{ - Namespace: test.namespace, - Method: method, - Container: cnr, - Object: obj, - Role: role, - SenderKey: senderKey, - ContainerOwner: testCnrOwner, - ContainerAttributes: map[string]string{ - cnrV2.SysAttributeZone: testSysAttrZone, - cnrV2.SysAttributeName: testSysAttrName, - }, - } - - headerSource := newHeaderProviderMock() - ffidProvider := newFrostfsIDProviderMock(t) - - var headerObjSDK *objectSDK.Object - if test.header.headerObjSDK != nil { - headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK) - if test.header.fromHeaderProvider { - require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider") - headerSource.addHeader(cnr, *obj, headerObjSDK) - } else if test.header.fromRequestResponseHeader { - prm.Header = headerObjSDK.ToV2().GetHeader() - } - } - - c := checkerImpl{ - headerProvider: headerSource, - frostFSIDClient: ffidProvider, - } - - r, err := c.newAPERequest(ctxWithPeerInfo(), prm) - if test.expectErr != nil { - require.Error(t, err) - require.ErrorIs(t, err, test.expectErr) - return - } - - expectedRequest := aperequest.NewRequest( - method, - aperequest.NewResource( - resourceName(cnr, obj, prm.Namespace), - objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header { - if headerObjSDK != nil { - return headerObjSDK.ToV2().GetHeader() - } - return prm.Header - }())), - map[string]string{ - nativeschema.PropertyKeyActorPublicKey: prm.SenderKey, - nativeschema.PropertyKeyActorRole: prm.Role, - fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr1"): "value1", - fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr2"): "value2", - commonschema.PropertyKeyFrostFSIDGroupID: "1", - commonschema.PropertyKeyFrostFSSourceIP: incomingIP, - }, - ) - - require.Equal(t, expectedRequest, r) - }) - } - }) - } -} - -func TestResourceName(t *testing.T) { - for _, test := range []struct { - name string - namespace string - container string - object *string - expected string - }{ - { - name: "non-root namespace, CID", - namespace: namespace, - container: containerID, - expected: fmt.Sprintf("native:object/%s/%s/*", namespace, containerID), - }, - { - name: "non-root namespace, CID, OID", - namespace: namespace, - container: containerID, - object: stringPtr(objectID), - expected: fmt.Sprintf("native:object/%s/%s/%s", namespace, containerID, objectID), - }, - { - name: "empty namespace, CID", - namespace: "", - container: containerID, - expected: fmt.Sprintf("native:object//%s/*", containerID), - }, - { - name: "empty namespace, CID, OID", - namespace: "", - container: containerID, - object: stringPtr(objectID), - expected: fmt.Sprintf("native:object//%s/%s", containerID, objectID), - }, - { - name: "root namespace, CID", - namespace: "root", - container: containerID, - expected: fmt.Sprintf("native:object//%s/*", containerID), - }, - { - name: "root namespace, CID, OID", - namespace: "root", - container: containerID, - object: stringPtr(objectID), - expected: fmt.Sprintf("native:object//%s/%s", containerID, objectID), - }, - } { - t.Run(test.name, func(t *testing.T) { - cnr := newContainerIDSDK(t, test.container) - obj := newObjectIDSDK(t, test.object) - require.Equal(t, test.expected, resourceName(cnr, obj, test.namespace)) - }) - } -} diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go deleted file mode 100644 index 5e04843f3..000000000 --- a/pkg/services/object/ape/service.go +++ /dev/null @@ -1,480 +0,0 @@ -package ape - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" -) - -type Service struct { - apeChecker Checker - - extractor RequestInfoExtractor - - next objectSvc.ServiceServer -} - -var _ objectSvc.ServiceServer = (*Service)(nil) - -type HeaderProvider interface { - GetHeader(ctx context.Context, cnr cid.ID, oid oid.ID, local bool) (*objectSDK.Object, error) -} - -type storageEngineHeaderProvider struct { - storageEngine *engine.StorageEngine - getSvc *getsvc.Service -} - -func (p storageEngineHeaderProvider) GetHeader(ctx context.Context, cnr cid.ID, objID oid.ID, local bool) (*objectSDK.Object, error) { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(objID) - if local { - return engine.Head(ctx, p.storageEngine, addr) - } - w := getsvc.NewSimpleObjectWriter() - var headPrm getsvc.HeadPrm - headPrm.WithAddress(addr) - headPrm.SetHeaderWriter(w) - headPrm.SetCommonParameters(&util.CommonPrm{}) // default values are ok - if err := p.getSvc.Head(ctx, headPrm); err != nil { - return nil, err - } - return w.Object(), nil -} - -func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) HeaderProvider { - return storageEngineHeaderProvider{ - storageEngine: e, - getSvc: s, - } -} - -func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { - return &Service{ - apeChecker: apeChecker, - extractor: extractor, - next: next, - } -} - -type getStreamBasicChecker struct { - objectSvc.GetObjectStream - - apeChecker Checker - - metadata Metadata - - reqInfo RequestInfo -} - -func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { - if partInit, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) - if err != nil { - return toStatusErr(err) - } - - prm := Prm{ - Namespace: g.reqInfo.Namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodGetObject, - SenderKey: g.reqInfo.SenderKey, - ContainerOwner: g.reqInfo.ContainerOwner, - ContainerAttributes: g.reqInfo.ContainerAttributes, - Role: g.reqInfo.Role, - BearerToken: g.metadata.BearerToken, - XHeaders: resp.GetMetaHeader().GetXHeaders(), - } - - if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil { - return toStatusErr(err) - } - } - return g.GetObjectStream.Send(resp) -} - -func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return err - } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) - if err != nil { - return err - } - return c.next.Get(request, &getStreamBasicChecker{ - GetObjectStream: stream, - apeChecker: c.apeChecker, - metadata: md, - reqInfo: reqInfo, - }) -} - -type putStreamBasicChecker struct { - apeChecker Checker - - extractor RequestInfoExtractor - - next objectSvc.PutObjectStream -} - -func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) - if err != nil { - return err - } - reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) - if err != nil { - return err - } - - prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: partInit.GetHeader(), - Method: nativeschema.MethodPutObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - } - - if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { - return toStatusErr(err) - } - } - - return p.next.Send(ctx, request) -} - -func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { - streamer, err := c.next.Put(ctx) - - return &putStreamBasicChecker{ - apeChecker: c.apeChecker, - extractor: c.extractor, - next: streamer, - }, err -} - -type patchStreamBasicChecker struct { - apeChecker Checker - - extractor RequestInfoExtractor - - next objectSvc.PatchObjectStream - - nonFirstSend bool -} - -func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { - if !p.nonFirstSend { - p.nonFirstSend = true - - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return err - } - reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) - if err != nil { - return err - } - - prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodPatchObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - } - - if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { - return toStatusErr(err) - } - } - - return p.next.Send(ctx, request) -} - -func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) { - streamer, err := c.next.Patch(ctx) - - return &patchStreamBasicChecker{ - apeChecker: c.apeChecker, - extractor: c.extractor, - next: streamer, - }, err -} - -func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return nil, err - } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) - if err != nil { - return nil, err - } - - resp, err := c.next.Head(ctx, request) - if err != nil { - return nil, err - } - - header := new(objectV2.Header) - switch headerPart := resp.GetBody().GetHeaderPart().(type) { - case *objectV2.ShortHeader: - cidV2 := new(refs.ContainerID) - md.Container.WriteToV2(cidV2) - header.SetContainerID(cidV2) - header.SetVersion(headerPart.GetVersion()) - header.SetCreationEpoch(headerPart.GetCreationEpoch()) - header.SetOwnerID(headerPart.GetOwnerID()) - header.SetObjectType(headerPart.GetObjectType()) - header.SetHomomorphicHash(header.GetHomomorphicHash()) - header.SetPayloadLength(headerPart.GetPayloadLength()) - header.SetPayloadHash(headerPart.GetPayloadHash()) - case *objectV2.HeaderWithSignature: - header = headerPart.GetHeader() - default: - return resp, nil - } - - err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: header, - Method: nativeschema.MethodHeadObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - }) - if err != nil { - return nil, toStatusErr(err) - } - return resp, nil -} - -func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) - if err != nil { - return err - } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) - if err != nil { - return err - } - - err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Method: nativeschema.MethodSearchObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - }) - if err != nil { - return toStatusErr(err) - } - - return c.next.Search(request, stream) -} - -func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return nil, err - } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) - if err != nil { - return nil, err - } - - err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodDeleteObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - }) - if err != nil { - return nil, toStatusErr(err) - } - - resp, err := c.next.Delete(ctx, request) - if err != nil { - return nil, err - } - - return resp, nil -} - -func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return err - } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) - if err != nil { - return err - } - - err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodRangeObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - }) - if err != nil { - return toStatusErr(err) - } - - return c.next.GetRange(request, stream) -} - -func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return nil, err - } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) - if err != nil { - return nil, err - } - - prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodHashObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - } - - resp, err := c.next.GetRangeHash(ctx, request) - if err != nil { - return nil, err - } - - if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { - return nil, toStatusErr(err) - } - return resp, nil -} - -func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) - if err != nil { - return nil, err - } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) - if err != nil { - return nil, err - } - - prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: request.GetBody().GetObject().GetHeader(), - Method: nativeschema.MethodPutObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), - } - - if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { - return nil, toStatusErr(err) - } - - return c.next.PutSingle(ctx, request) -} - -type request interface { - GetMetaHeader() *session.RequestMetaHeader - GetVerificationHeader() *session.RequestVerificationHeader -} - -func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) - if err != nil { - return - } - session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) - if err != nil { - return - } - bearer, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return - } - - md = Metadata{ - Container: cnrID, - Object: objID, - VerificationHeader: request.GetVerificationHeader(), - SessionToken: session, - BearerToken: bearer, - } - return -} diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go deleted file mode 100644 index 97dbfa658..000000000 --- a/pkg/services/object/ape/types.go +++ /dev/null @@ -1,17 +0,0 @@ -package ape - -import "context" - -// Checker provides methods to check requests and responses -// with access policy engine. -type Checker interface { - CheckAPE(context.Context, Prm) error -} - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys(ctx context.Context) ([][]byte, error) -} diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/ape/util.go deleted file mode 100644 index 5cd2caa50..000000000 --- a/pkg/services/object/ape/util.go +++ /dev/null @@ -1,169 +0,0 @@ -package ape - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return - } - } else { - err = errMissingContainerID - return - } - - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } - } - return -} - -// originalBearerToken goes down to original request meta header and fetches -// bearer token from there. -func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, error) { - for header.GetOrigin() != nil { - header = header.GetOrigin() - } - - tokV2 := header.GetBearerToken() - if tokV2 == nil { - return nil, nil - } - - var tok bearer.Token - return &tok, tok.ReadFromV2(*tokV2) -} - -func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { - // 1. First check signature of session token. - if !token.VerifySignature() { - return nil, nil, errInvalidSessionSig - } - - // 2. Then check if session token owner issued the session token - // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion - var tokV2 sessionV2.Token - token.WriteToV2(&tokV2) - - tokenIssuerKey, err := unmarshalPublicKey(tokV2.GetSignature().GetKey()) - if err != nil { - return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err) - } - - tokenIssuer := token.Issuer() - - if !isOwnerFromKey(tokenIssuer, tokenIssuerKey) { - // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again - return nil, nil, errInvalidSessionOwner - } - - return &tokenIssuer, tokenIssuerKey, nil -} - -func originalBodySignature(v *sessionV2.RequestVerificationHeader) *refsV2.Signature { - if v == nil { - return nil - } - - for v.GetOrigin() != nil { - v = v.GetOrigin() - } - - return v.GetBodySignature() -} - -func unmarshalPublicKey(bs []byte) (*keys.PublicKey, error) { - return keys.NewPublicKeyFromBytes(bs, elliptic.P256()) -} - -func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { - if key == nil { - return false - } - - var id2 user.ID - user.IDFromKey(&id2, (ecdsa.PublicKey)(*key)) - - return id2.Equals(id) -} - -// assertVerb checks that token verb corresponds to the method. -func assertVerb(tok sessionSDK.Object, method string) bool { - switch method { - case nativeschema.MethodPutObject: - return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) - case nativeschema.MethodDeleteObject: - return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case nativeschema.MethodGetObject: - return tok.AssertVerb(sessionSDK.VerbObjectGet) - case nativeschema.MethodHeadObject: - return tok.AssertVerb( - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectPatch, - ) - case nativeschema.MethodSearchObject: - return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case nativeschema.MethodRangeObject: - return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) - case nativeschema.MethodHashObject: - return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) - case nativeschema.MethodPatchObject: - return tok.AssertVerb(sessionSDK.VerbObjectPatch) - } - return false -} - -// assertSessionRelation checks if given token describing the FrostFS session -// relates to the given container and optional object. Missing object -// means that the context isn't bound to any FrostFS object in the container. -// Returns no error iff relation is correct. Criteria: -// -// session is bound to the given container -// object is not specified or session is bound to this object -// -// Session MUST be bound to the particular container, otherwise behavior is undefined. -func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error { - if !tok.AssertContainer(cnr) { - return errors.New("requested container is not related to the session") - } - - if obj != nil && !tok.AssertObject(*obj) { - return errors.New("requested object is not related to the session") - } - - return nil -} - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go deleted file mode 100644 index 916bce427..000000000 --- a/pkg/services/object/ape/util_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package ape - -import ( - "slices" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" -) - -func TestIsVerbCompatible(t *testing.T) { - table := map[string][]sessionSDK.ObjectVerb{ - nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, - nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, - nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, - nativeschema.MethodHeadObject: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectPatch, - }, - nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, - nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, - nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - sessionSDK.VerbObjectPatch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - contains := slices.Contains(list, verb) - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go deleted file mode 100644 index f8ee089fe..000000000 --- a/pkg/services/object/audit.go +++ /dev/null @@ -1,233 +0,0 @@ -package object - -import ( - "context" - "errors" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -var _ ServiceServer = (*auditService)(nil) - -type auditService struct { - next ServiceServer - log *logger.Logger - enabled *atomic.Bool -} - -func NewAuditService(next ServiceServer, log *logger.Logger, enabled *atomic.Bool) ServiceServer { - return &auditService{ - next: next, - log: log, - enabled: enabled, - } -} - -// Delete implements ServiceServer. -func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { - res, err := a.next.Delete(ctx, req) - if !a.enabled.Load() { - return res, err - } - audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) - return res, err -} - -// Get implements ServiceServer. -func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error { - err := a.next.Get(req, stream) - if !a.enabled.Load() { - return err - } - audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) - return err -} - -// GetRange implements ServiceServer. -func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error { - err := a.next.GetRange(req, stream) - if !a.enabled.Load() { - return err - } - audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) - return err -} - -// GetRangeHash implements ServiceServer. -func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - resp, err := a.next.GetRangeHash(ctx, req) - if !a.enabled.Load() { - return resp, err - } - audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) - return resp, err -} - -// Head implements ServiceServer. -func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { - resp, err := a.next.Head(ctx, req) - if !a.enabled.Load() { - return resp, err - } - audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) - return resp, err -} - -// Put implements ServiceServer. -func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) { - res, err := a.next.Put(ctx) - if !a.enabled.Load() { - return res, err - } - if err != nil { - audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) - return res, err - } - return &auditPutStream{ - stream: res, - log: a.log, - }, nil -} - -// PutSingle implements ServiceServer. -func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - resp, err := a.next.PutSingle(ctx, req) - if !a.enabled.Load() { - return resp, err - } - audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, - audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(), - req.GetBody().GetObject().GetObjectID()), - err == nil) - return resp, err -} - -// Search implements ServiceServer. -func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) error { - err := a.next.Search(req, stream) - if !a.enabled.Load() { - return err - } - audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req, - audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) - return err -} - -var _ PutObjectStream = (*auditPutStream)(nil) - -type auditPutStream struct { - stream PutObjectStream - log *logger.Logger - - failed bool - key []byte - containerID *refs.ContainerID - objectID *refs.ObjectID -} - -// CloseAndRecv implements PutObjectStream. -func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { - resp, err := a.stream.CloseAndRecv(ctx) - if err != nil { - a.failed = true - } - a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, - audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), - !a.failed) - return resp, err -} - -// Send implements PutObjectStream. -func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error { - if partInit, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartInit); ok { - a.containerID = partInit.GetHeader().GetContainerID() - a.objectID = partInit.GetObjectID() - a.key = req.GetVerificationHeader().GetBodySignature().GetKey() - } - - err := a.stream.Send(ctx, req) - if err != nil { - a.failed = true - } - if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, - audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), - !a.failed) - } - return err -} - -type auditPatchStream struct { - stream PatchObjectStream - log *logger.Logger - - failed bool - key []byte - containerID *refs.ContainerID - objectID *refs.ObjectID - - nonFirstSend bool -} - -func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) { - res, err := a.next.Patch(ctx) - if !a.enabled.Load() { - return res, err - } - if err != nil { - audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false) - return res, err - } - return &auditPatchStream{ - stream: res, - log: a.log, - }, nil -} - -// CloseAndRecv implements PatchObjectStream. -func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { - resp, err := a.stream.CloseAndRecv(ctx) - if err != nil { - a.failed = true - } - a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, - audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), - !a.failed) - return resp, err -} - -// Send implements PatchObjectStream. -func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) error { - if !a.nonFirstSend { - a.containerID = req.GetBody().GetAddress().GetContainerID() - a.objectID = req.GetBody().GetAddress().GetObjectID() - a.key = req.GetVerificationHeader().GetBodySignature().GetKey() - a.nonFirstSend = true - } - - err := a.stream.Send(ctx, req) - if err != nil { - a.failed = true - } - if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, - audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), - !a.failed) - } - return err -} diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go deleted file mode 100644 index ef65e78bc..000000000 --- a/pkg/services/object/common.go +++ /dev/null @@ -1,105 +0,0 @@ -package object - -import ( - "context" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -// NodeState is storage node state processed by Object service. -type NodeState interface { - // IsMaintenance checks if node is under maintenance. Node MUST NOT serve - // local object operations. Node MUST respond with apistatus.NodeUnderMaintenance - // error if IsMaintenance returns true. - IsMaintenance() bool -} - -// Common is an Object API ServiceServer which encapsulates logic spread to all -// object operations. -// -// If underlying NodeState.IsMaintenance returns true, all operations are -// immediately failed with apistatus.NodeUnderMaintenance. -type Common struct { - state NodeState - - nextHandler ServiceServer -} - -// Init initializes the Common instance. -func (x *Common) Init(state NodeState, nextHandler ServiceServer) { - x.state = state - x.nextHandler = nextHandler -} - -func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error { - if x.state.IsMaintenance() { - return new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.Get(req, stream) -} - -func (x *Common) Put(ctx context.Context) (PutObjectStream, error) { - if x.state.IsMaintenance() { - return nil, new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.Put(ctx) -} - -func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) { - if x.state.IsMaintenance() { - return nil, new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.Patch(ctx) -} - -func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - if x.state.IsMaintenance() { - return nil, new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.Head(ctx, req) -} - -func (x *Common) Search(req *objectV2.SearchRequest, stream SearchStream) error { - if x.state.IsMaintenance() { - return new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.Search(req, stream) -} - -func (x *Common) Delete(ctx context.Context, req *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - if x.state.IsMaintenance() { - return nil, new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.Delete(ctx, req) -} - -func (x *Common) GetRange(req *objectV2.GetRangeRequest, stream GetObjectRangeStream) error { - if x.state.IsMaintenance() { - return new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.GetRange(req, stream) -} - -func (x *Common) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - if x.state.IsMaintenance() { - return nil, new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.GetRangeHash(ctx, req) -} - -func (x *Common) PutSingle(ctx context.Context, req *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - if x.state.IsMaintenance() { - return nil, new(apistatus.NodeUnderMaintenance) - } - - return x.nextHandler.PutSingle(ctx, req) -} diff --git a/pkg/services/object/common/target/builder.go b/pkg/services/object/common/target/builder.go deleted file mode 100644 index ea68365a7..000000000 --- a/pkg/services/object/common/target/builder.go +++ /dev/null @@ -1,54 +0,0 @@ -package target - -import ( - "context" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" -) - -var _ transformer.ChunkedObjectWriter = (*inMemoryObjectBuilder)(nil) - -type inMemoryObjectBuilder struct { - objectWriter transformer.ObjectWriter - payload *payload - - obj *objectSDK.Object -} - -func newInMemoryObjectBuilder(objectWriter transformer.ObjectWriter) *inMemoryObjectBuilder { - return &inMemoryObjectBuilder{ - objectWriter: objectWriter, - payload: getPayload(), - } -} - -func (b *inMemoryObjectBuilder) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) { - defer func() { - putPayload(b.payload) - b.payload = nil - }() - - b.obj.SetPayload(b.payload.Data) - - if err := b.objectWriter.WriteObject(ctx, b.obj); err != nil { - return nil, err - } - - id, _ := b.obj.ID() - return &transformer.AccessIdentifiers{ - SelfID: id, - }, nil -} - -func (b *inMemoryObjectBuilder) Write(_ context.Context, p []byte) (int, error) { - b.payload.Data = append(b.payload.Data, p...) - - return len(p), nil -} - -func (b *inMemoryObjectBuilder) WriteHeader(_ context.Context, obj *objectSDK.Object) error { - b.obj = obj - - return nil -} diff --git a/pkg/services/object/common/target/pool.go b/pkg/services/object/common/target/pool.go deleted file mode 100644 index 71da305ad..000000000 --- a/pkg/services/object/common/target/pool.go +++ /dev/null @@ -1,30 +0,0 @@ -package target - -import ( - "sync" -) - -const ( - defaultAllocSize = 1024 - poolSliceMaxSize = 128 * 1024 -) - -type payload struct { - Data []byte -} - -var putBytesPool = &sync.Pool{ - New: func() any { return &payload{Data: make([]byte, 0, defaultAllocSize)} }, -} - -func getPayload() *payload { - return putBytesPool.Get().(*payload) -} - -func putPayload(p *payload) { - if cap(p.Data) > poolSliceMaxSize { - return - } - p.Data = p.Data[:0] - putBytesPool.Put(p) -} diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go deleted file mode 100644 index f2bd907db..000000000 --- a/pkg/services/object/common/target/target.go +++ /dev/null @@ -1,168 +0,0 @@ -package target - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { - // prepare needed put parameters - if err := preparePrm(ctx, &prm); err != nil { - return nil, fmt.Errorf("could not prepare put parameters: %w", err) - } - - if prm.Header.Signature() != nil { - return newUntrustedTarget(ctx, &prm) - } - return newTrustedTarget(ctx, &prm) -} - -func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) - if maxPayloadSz == 0 { - return nil, errors.New("could not obtain max object size parameter") - } - - if prm.SignRequestPrivateKey == nil { - nodeKey, err := prm.Config.KeyStorage.GetKey(nil) - if err != nil { - return nil, err - } - prm.SignRequestPrivateKey = nodeKey - } - - // prepare untrusted-Put object target - return &validatingPreparedTarget{ - nextTarget: newInMemoryObjectBuilder(objectwriter.New(prm)), - fmt: prm.Config.FormatValidator, - - maxPayloadSz: maxPayloadSz, - }, nil -} - -func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { - prm.Relay = nil // do not relay request without signature - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) - if maxPayloadSz == 0 { - return nil, errors.New("could not obtain max object size parameter") - } - - sToken := prm.Common.SessionToken() - - // prepare trusted-Put object target - - // get private token from local storage - var sessionInfo *util.SessionInfo - - if sToken != nil { - sessionInfo = &util.SessionInfo{ - ID: sToken.ID(), - Owner: sToken.Issuer(), - } - } - - key, err := prm.Config.KeyStorage.GetKey(sessionInfo) - if err != nil { - return nil, fmt.Errorf("could not receive session key: %w", err) - } - - // In case session token is missing, the line above returns the default key. - // If it isn't owner key, replication attempts will fail, thus this check. - ownerObj := prm.Header.OwnerID() - if ownerObj.IsEmpty() { - return nil, errors.New("missing object owner") - } - - if sToken == nil { - var ownerSession user.ID - user.IDFromKey(&ownerSession, key.PublicKey) - - if !ownerObj.Equals(ownerSession) { - return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) - } - } else if !ownerObj.Equals(sessionInfo.Owner) { - return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) - } - - if prm.SignRequestPrivateKey == nil { - prm.SignRequestPrivateKey = key - } - - return &validatingTarget{ - fmt: prm.Config.FormatValidator, - nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: key, - NextTargetInit: func() transformer.ObjectWriter { return objectwriter.New(prm) }, - NetworkState: prm.Config.NetworkState, - MaxSize: maxPayloadSz, - WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.Container), - SessionToken: sToken, - }), - }, nil -} - -func preparePrm(ctx context.Context, prm *objectwriter.Params) error { - var err error - - // get latest network map - nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource) - if err != nil { - return fmt.Errorf("could not get latest network map: %w", err) - } - - idCnr, ok := prm.Header.ContainerID() - if !ok { - return errors.New("missing container ID") - } - - // get container to store the object - cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr) - if err != nil { - return fmt.Errorf("could not get container by ID: %w", err) - } - - prm.Container = cnrInfo.Value - - // add common options - prm.TraverseOpts = append(prm.TraverseOpts, - // set processing container - placement.ForContainer(prm.Container), - ) - - if ech := prm.Header.ECHeader(); ech != nil { - prm.TraverseOpts = append(prm.TraverseOpts, - // set identifier of the processing object - placement.ForObject(ech.Parent()), - ) - } else if id, ok := prm.Header.ID(); ok { - prm.TraverseOpts = append(prm.TraverseOpts, - // set identifier of the processing object - placement.ForObject(id), - ) - } - - // create placement builder from network map - builder := placement.NewNetworkMapBuilder(nm) - - if prm.Common.LocalOnly() { - // restrict success count to 1 stored copy (to local storage) - prm.TraverseOpts = append(prm.TraverseOpts, placement.SuccessAfter(1)) - - // use local-only placement builder - builder = util.NewLocalPlacement(builder, prm.Config.NetmapKeys) - } - - // set placement builder - prm.TraverseOpts = append(prm.TraverseOpts, placement.UseBuilder(builder)) - - return nil -} diff --git a/pkg/services/object/common/target/validation.go b/pkg/services/object/common/target/validation.go deleted file mode 100644 index b29721d01..000000000 --- a/pkg/services/object/common/target/validation.go +++ /dev/null @@ -1,145 +0,0 @@ -package target - -import ( - "bytes" - "context" - "crypto/sha256" - "errors" - "fmt" - "hash" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "git.frostfs.info/TrueCloudLab/tzhash/tz" -) - -// validatingTarget validates unprepared object format and content (streaming PUT case). -type validatingTarget struct { - nextTarget transformer.ChunkedObjectWriter - - fmt *object.FormatValidator -} - -// validatingPreparedTarget validates prepared object format and content. -type validatingPreparedTarget struct { - nextTarget transformer.ChunkedObjectWriter - - fmt *object.FormatValidator - - hash hash.Hash - - checksum []byte - - maxPayloadSz uint64 // network config - - payloadSz uint64 // payload size of the streaming object from header - - writtenPayload uint64 // number of already written payload bytes -} - -var ( - // ErrExceedingMaxSize is returned when payload size is greater than the limit. - ErrExceedingMaxSize = errors.New("payload size is greater than the limit") - // ErrWrongPayloadSize is returned when chunk payload size is greater than the length declared in header. - ErrWrongPayloadSize = errors.New("wrong payload size") -) - -func (t *validatingTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error { - if err := t.fmt.Validate(ctx, obj, true); err != nil { - return fmt.Errorf("(%T) could not validate object format: %w", t, err) - } - - return t.nextTarget.WriteHeader(ctx, obj) -} - -func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err error) { - return t.nextTarget.Write(ctx, p) -} - -func (t *validatingTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) { - return t.nextTarget.Close(ctx) -} - -func (t *validatingPreparedTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error { - t.payloadSz = obj.PayloadSize() - chunkLn := uint64(len(obj.Payload())) - - // check chunk size - if chunkLn > t.payloadSz { - return ErrWrongPayloadSize - } - - // check payload size limit - if t.payloadSz > t.maxPayloadSz { - return ErrExceedingMaxSize - } - - cs, csSet := obj.PayloadChecksum() - if !csSet { - return errors.New("missing payload checksum") - } - - switch typ := cs.Type(); typ { - default: - return fmt.Errorf("(%T) unsupported payload checksum type %v", t, typ) - case checksum.SHA256: - t.hash = sha256.New() - case checksum.TZ: - t.hash = tz.New() - } - - t.checksum = cs.Value() - - if err := t.fmt.Validate(ctx, obj, false); err != nil { - return fmt.Errorf("(%T) could not validate object format: %w", t, err) - } - - err := t.nextTarget.WriteHeader(ctx, obj) - if err != nil { - return err - } - - // update written bytes - // - // Note: we MUST NOT add obj.PayloadSize() since obj - // can carry only the chunk of the full payload - t.writtenPayload += chunkLn - - return nil -} - -func (t *validatingPreparedTarget) Write(ctx context.Context, p []byte) (n int, err error) { - chunkLn := uint64(len(p)) - - // check if new chunk will overflow payload size - if t.writtenPayload+chunkLn > t.payloadSz { - return 0, ErrWrongPayloadSize - } - - _, err = t.hash.Write(p) - if err != nil { - return - } - - n, err = t.nextTarget.Write(ctx, p) - if err == nil { - t.writtenPayload += uint64(n) - } - - return -} - -func (t *validatingPreparedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) { - // check payload size correctness - if t.payloadSz != t.writtenPayload { - return nil, ErrWrongPayloadSize - } - - if !bytes.Equal(t.hash.Sum(nil), t.checksum) { - return nil, fmt.Errorf("(%T) incorrect payload checksum", t) - } - - return t.nextTarget.Close(ctx) -} diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go deleted file mode 100644 index 6593d3ca0..000000000 --- a/pkg/services/object/common/writer/common.go +++ /dev/null @@ -1,114 +0,0 @@ -package writer - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -type NodeIterator struct { - Traversal - cfg *Config -} - -func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { - return &NodeIterator{ - Traversal: Traversal{ - Opts: opts, - Exclude: make(map[string]*bool), - }, - cfg: c, - } -} - -func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(ctx, n.Opts...) - if err != nil { - return fmt.Errorf("could not create object placement traverser: %w", err) - } - - resErr := &atomic.Value{} - - // Must iterate over all replicas, regardless of whether there are identical nodes there. - // At the same time need to exclude identical nodes from processing. - for { - addrs := traverser.Next() - if len(addrs) == 0 { - break - } - - if n.forEachAddress(ctx, traverser, addrs, f, resErr) { - break - } - } - - if !traverser.Success() { - var err errIncompletePut - err.singleErr, _ = resErr.Load().(error) - return err - } - - // perform additional container broadcast if needed - if n.submitPrimaryPlacementFinish() { - err := n.ForEachNode(ctx, f) - if err != nil { - n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) - // we don't fail primary operation because of broadcast failure - } - } - - return nil -} - -func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, NodeDescriptor) error, resErr *atomic.Value) bool { - var wg sync.WaitGroup - - for _, addr := range addrs { - if ok := n.Exclude[string(addr.PublicKey())]; ok != nil { - if *ok { - traverser.SubmitSuccess() - } - // This can happen only during additional container broadcast. - continue - } - - isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey()) - - item := new(bool) - wg.Add(1) - go func() { - defer wg.Done() - - err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) - if err != nil { - resErr.Store(err) - svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err) - return - } - - traverser.SubmitSuccess() - *item = true - }() - - // Mark the container node as processed in order to exclude it - // in subsequent container broadcast. Note that we don't - // process this node during broadcast if primary placement - // on it failed. - n.submitProcessed(addr, item) - } - - wg.Wait() - - return false -} - -func NeedAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool { - return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock)) -} diff --git a/pkg/services/object/common/writer/dispatcher.go b/pkg/services/object/common/writer/dispatcher.go deleted file mode 100644 index bb9a54ce9..000000000 --- a/pkg/services/object/common/writer/dispatcher.go +++ /dev/null @@ -1,23 +0,0 @@ -package writer - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" -) - -var _ transformer.ObjectWriter = (*objectWriterDispatcher)(nil) - -type objectWriterDispatcher struct { - ecWriter transformer.ObjectWriter - repWriter transformer.ObjectWriter -} - -func (m *objectWriterDispatcher) WriteObject(ctx context.Context, obj *objectSDK.Object) error { - if object.IsECSupported(obj) { - return m.ecWriter.WriteObject(ctx, obj) - } - return m.repWriter.WriteObject(ctx, obj) -} diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go deleted file mode 100644 index fff58aca7..000000000 --- a/pkg/services/object/common/writer/distributed.go +++ /dev/null @@ -1,135 +0,0 @@ -package writer - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -type preparedObjectTarget interface { - WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error -} - -type distributedWriter struct { - cfg *Config - - placementOpts []placement.Option - - obj *objectSDK.Object - objMeta object.ContentMeta - - nodeTargetInitializer func(NodeDescriptor) preparedObjectTarget - - relay func(context.Context, NodeDescriptor) error - - resetSuccessAfterOnBroadcast bool -} - -// Traversal parameters and state of container. -type Traversal struct { - Opts []placement.Option - - // need of additional broadcast after the object is saved - ExtraBroadcastEnabled bool - - // container nodes which was processed during the primary object placement - Exclude map[string]*bool - - ResetSuccessAfterOnBroadcast bool -} - -// updates traversal parameters after the primary placement finish and -// returns true if additional container broadcast is needed. -func (x *Traversal) submitPrimaryPlacementFinish() bool { - if x.ExtraBroadcastEnabled { - // do not track success during container broadcast (best-effort) - x.Opts = append(x.Opts, placement.WithoutSuccessTracking()) - - if x.ResetSuccessAfterOnBroadcast { - x.Opts = append(x.Opts, placement.ResetSuccessAfter()) - } - - // avoid 2nd broadcast - x.ExtraBroadcastEnabled = false - - return true - } - - return false -} - -// marks the container node as processed during the primary object placement. -func (x *Traversal) submitProcessed(n placement.Node, item *bool) { - if x.ExtraBroadcastEnabled { - key := string(n.PublicKey()) - - if x.Exclude == nil { - x.Exclude = make(map[string]*bool, 1) - } - - x.Exclude[key] = item - } -} - -type NodeDescriptor struct { - Local bool - - Info placement.Node -} - -// errIncompletePut is returned if processing on a container fails. -type errIncompletePut struct { - singleErr error // error from the last responding node -} - -func (x errIncompletePut) Error() string { - const commonMsg = "incomplete object PUT by placement" - - if x.singleErr != nil { - return fmt.Sprintf("%s: %v", commonMsg, x.singleErr) - } - - return commonMsg -} - -func (x errIncompletePut) Unwrap() error { - return x.singleErr -} - -// WriteObject implements the transformer.ObjectWriter interface. -func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { - t.obj = obj - - var err error - - if t.objMeta, err = t.cfg.FormatValidator.ValidateContent(t.obj); err != nil { - return fmt.Errorf("(%T) could not validate payload content: %w", t, err) - } - return t.iteratePlacement(ctx) -} - -func (t *distributedWriter) sendObject(ctx context.Context, node NodeDescriptor) error { - if !node.Local && t.relay != nil { - return t.relay(ctx, node) - } - - target := t.nodeTargetInitializer(node) - - err := target.WriteObject(ctx, t.obj, t.objMeta) - if err != nil { - return fmt.Errorf("could not write header: %w", err) - } - return nil -} - -func (t *distributedWriter) iteratePlacement(ctx context.Context) error { - id, _ := t.obj.ID() - - iter := t.cfg.NewNodeIterator(append(t.placementOpts, placement.ForObject(id))) - iter.ExtraBroadcastEnabled = NeedAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */) - iter.ResetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast - return iter.ForEachNode(ctx, t.sendObject) -} diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go deleted file mode 100644 index 26a53e315..000000000 --- a/pkg/services/object/common/writer/ec.go +++ /dev/null @@ -1,355 +0,0 @@ -package writer - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -var _ transformer.ObjectWriter = (*ECWriter)(nil) - -var ( - errUnsupportedECObject = errors.New("object is not supported for erasure coding") - errFailedToSaveAllECParts = errors.New("failed to save all EC parts") -) - -type ECWriter struct { - Config *Config - PlacementOpts []placement.Option - Container containerSDK.Container - Key *ecdsa.PrivateKey - CommonPrm *svcutil.CommonPrm - Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error - - ObjectMeta object.ContentMeta - ObjectMetaValid bool - - remoteRequestSignKey *ecdsa.PrivateKey -} - -func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { - relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj) - if err != nil { - return err - } - if relayed { - return nil - } - - if !object.IsECSupported(obj) { - // must be resolved by caller - return errUnsupportedECObject - } - - if !e.ObjectMetaValid { - if e.ObjectMeta, err = e.Config.FormatValidator.ValidateContent(obj); err != nil { - return fmt.Errorf("(%T) could not validate payload content: %w", e, err) - } - e.ObjectMetaValid = true - } - - if isContainerNode { - restoreTokens := e.CommonPrm.ForgetTokens() - defer restoreTokens() - // As request executed on container node, so sign request with container key. - e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil) - if err != nil { - return err - } - } else { - e.remoteRequestSignKey = e.Key - } - - if obj.ECHeader() != nil { - return e.writeECPart(ctx, obj) - } - return e.writeRawObject(ctx, obj) -} - -func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) { - currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx) - if err != nil { - return false, false, err - } - if currentNodeIsContainerNode { - // object can be splitted or saved local - return false, true, nil - } - if e.Relay == nil { - return false, currentNodeIsContainerNode, nil - } - objID := object.AddressOf(obj).Object() - var index uint32 - if obj.ECHeader() != nil { - objID = obj.ECHeader().Parent() - index = obj.ECHeader().Index() - } - if err := e.relayToContainerNode(ctx, objID, index); err != nil { - return false, false, err - } - return true, currentNodeIsContainerNode, nil -} - -func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) { - t, err := placement.NewTraverser(ctx, e.PlacementOpts...) - if err != nil { - return false, err - } - for { - nodes := t.Next() - if len(nodes) == 0 { - break - } - for _, node := range nodes { - if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) { - return true, nil - } - } - } - return false, nil -} - -func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error { - t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) - if err != nil { - return err - } - var lastErr error - offset := int(index) - for { - nodes := t.Next() - if len(nodes) == 0 { - break - } - for idx := range nodes { - node := nodes[(idx+offset)%len(nodes)] - var info client.NodeInfo - client.NodeInfoFromNetmapElement(&info, node) - - c, err := e.Config.ClientConstructor.Get(info) - if err != nil { - return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) - } - - err = e.Relay(ctx, info, c) - if err == nil { - return nil - } - e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup())) - lastErr = err - } - } - if lastErr == nil { - return nil - } - return errIncompletePut{ - singleErr: lastErr, - } -} - -func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error { - if e.CommonPrm.LocalOnly() { - return e.writePartLocal(ctx, obj) - } - - t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) - if err != nil { - return err - } - - eg, egCtx := errgroup.WithContext(ctx) - for { - nodes := t.Next() - if len(nodes) == 0 { - break - } - - eg.Go(func() error { - return e.writePart(egCtx, obj, int(obj.ECHeader().Index()), nodes, make([]atomic.Bool, len(nodes))) - }) - t.SubmitSuccess() - } - if err := eg.Wait(); err != nil { - return errIncompletePut{ - singleErr: err, - } - } - return nil -} - -func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error { - // now only single EC policy is supported - c, err := erasurecode.NewConstructor(policy.ECDataCount(e.Container.PlacementPolicy()), policy.ECParityCount(e.Container.PlacementPolicy())) - if err != nil { - return err - } - parts, err := c.Split(obj, e.Key) - if err != nil { - return err - } - partsProcessed := make([]atomic.Bool, len(parts)) - objID, _ := obj.ID() - t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) - if err != nil { - return err - } - - for { - eg, egCtx := errgroup.WithContext(ctx) - nodes := t.Next() - if len(nodes) == 0 { - break - } - - visited := make([]atomic.Bool, len(nodes)) - for idx := range parts { - visited[idx%len(nodes)].Store(true) - } - - for idx := range parts { - if !partsProcessed[idx].Load() { - eg.Go(func() error { - err := e.writePart(egCtx, parts[idx], idx, nodes, visited) - if err == nil { - partsProcessed[idx].Store(true) - t.SubmitSuccess() - } - return err - }) - } - } - err = eg.Wait() - } - if err != nil { - return errIncompletePut{ - singleErr: err, - } - } - for idx := range partsProcessed { - if !partsProcessed[idx].Load() { - return errIncompletePut{ - singleErr: errFailedToSaveAllECParts, - } - } - } - return nil -} - -func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // try to save to node for current part index - node := nodes[partIdx%len(nodes)] - err := e.putECPartToNode(ctx, obj, node) - if err == nil { - return nil - } else if clientSDK.IsErrObjectAlreadyRemoved(err) { - return err - } - e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), - zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), - zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) - - partVisited := make([]bool, len(nodes)) - partVisited[partIdx%len(nodes)] = true - - // try to save to any node not visited by any of other parts - for i := 1; i < len(nodes); i++ { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - idx := (partIdx + i) % len(nodes) - if !visited[idx].CompareAndSwap(false, true) { - continue - } - node = nodes[idx] - err := e.putECPartToNode(ctx, obj, node) - if err == nil { - return nil - } - e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), - zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), - zap.String("node", hex.EncodeToString(node.PublicKey())), - zap.Error(err)) - - partVisited[idx] = true - } - - // try to save to any node not visited by current part - for i := range nodes { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if partVisited[i] { - continue - } - node = nodes[i] - err := e.putECPartToNode(ctx, obj, node) - if err == nil { - return nil - } - e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), - zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), - zap.String("node", hex.EncodeToString(node.PublicKey())), - zap.Error(err)) - } - - return fmt.Errorf("failed to save EC chunk %s to any node", object.AddressOf(obj)) -} - -func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { - if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) { - return e.writePartLocal(ctx, obj) - } - return e.writePartRemote(ctx, obj, node) -} - -func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { - localTarget := LocalTarget{ - Storage: e.Config.LocalStore, - Container: e.Container, - } - return localTarget.WriteObject(ctx, obj, e.ObjectMeta) -} - -func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { - var clientNodeInfo client.NodeInfo - client.NodeInfoFromNetmapElement(&clientNodeInfo, node) - - remoteTaget := remoteWriter{ - privateKey: e.remoteRequestSignKey, - clientConstructor: e.Config.ClientConstructor, - commonPrm: e.CommonPrm, - nodeInfo: clientNodeInfo, - } - - return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) -} diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go deleted file mode 100644 index d5eeddf21..000000000 --- a/pkg/services/object/common/writer/ec_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package writer - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/sha256" - "errors" - "fmt" - "slices" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "git.frostfs.info/TrueCloudLab/tzhash/tz" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type testPlacementBuilder struct { - vectors [][]netmap.NodeInfo -} - -func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( - [][]netmap.NodeInfo, error, -) { - arr := slices.Clone(p.vectors[0]) - return [][]netmap.NodeInfo{arr}, nil -} - -type nmKeys struct{} - -func (nmKeys) IsLocalKey(_ []byte) bool { - return false -} - -type clientConstructor struct { - vectors [][]netmap.NodeInfo -} - -func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) { - if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) || - bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) { - return multiAddressClient{err: errors.New("node unavailable")}, nil - } - return multiAddressClient{}, nil -} - -type multiAddressClient struct { - client.MultiAddressClient - err error -} - -func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) { - if c.err != nil { - return nil, c.err - } - return &apiclient.ResObjectPutSingle{}, nil -} - -func (c multiAddressClient) ReportError(error) { -} - -func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error { - return nil -} - -func TestECWriter(t *testing.T) { - // Create container with policy EC 1.1 - cnr := container.Container{} - p1 := netmap.PlacementPolicy{} - p1.SetContainerBackupFactor(1) - x1 := netmap.ReplicaDescriptor{} - x1.SetECDataCount(1) - x1.SetECParityCount(1) - p1.AddReplicas(x1) - cnr.SetPlacementPolicy(p1) - cnr.SetAttribute("cnr", "cnr1") - - cid := cidtest.ID() - - // Create 4 nodes, 2 nodes for chunks, - // 2 nodes for the case when the first two will fail. - ns, _ := testNodeMatrix(t, []int{4}) - - data := make([]byte, 100) - _, _ = rand.Read(data) - ver := version.Current() - - var csum checksum.Checksum - csum.SetSHA256(sha256.Sum256(data)) - - var csumTZ checksum.Checksum - csumTZ.SetTillichZemor(tz.Sum(csum.Value())) - - obj := objectSDK.New() - obj.SetID(oidtest.ID()) - obj.SetOwnerID(usertest.ID()) - obj.SetContainerID(cid) - obj.SetVersion(&ver) - obj.SetPayload(data) - obj.SetPayloadSize(uint64(len(data))) - obj.SetPayloadChecksum(csum) - obj.SetPayloadHomomorphicHash(csumTZ) - - // Builder return nodes without sort by hrw - builder := &testPlacementBuilder{ - vectors: ns, - } - - ownerKey, err := keys.NewPrivateKey() - require.NoError(t, err) - nodeKey, err := keys.NewPrivateKey() - require.NoError(t, err) - - log, err := logger.NewLogger(logger.Prm{}) - require.NoError(t, err) - - var n nmKeys - ecw := ECWriter{ - Config: &Config{ - NetmapKeys: n, - Logger: log, - ClientConstructor: clientConstructor{vectors: ns}, - KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), - }, - PlacementOpts: append( - []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)}, - placement.WithCopyNumbers(nil)), // copies number ignored for EC - Container: cnr, - Key: &ownerKey.PrivateKey, - Relay: nil, - ObjectMetaValid: true, - } - - err = ecw.WriteObject(context.Background(), obj) - require.NoError(t, err) -} - -func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { - mNodes := make([][]netmap.NodeInfo, len(dim)) - mAddr := make([][]string, len(dim)) - - for i := range dim { - ns := make([]netmap.NodeInfo, dim[i]) - as := make([]string, dim[i]) - - for j := range dim[i] { - a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", - strconv.Itoa(i), - strconv.Itoa(60000+j), - ) - - var ni netmap.NodeInfo - ni.SetNetworkEndpoints(a) - ni.SetPublicKey([]byte(a)) - - var na network.AddressGroup - - err := na.FromIterator(netmapcore.Node(ni)) - require.NoError(t, err) - - as[j] = network.StringifyGroup(na) - - ns[j] = ni - } - - mNodes[i] = ns - mAddr[i] = as - } - - return mNodes, mAddr -} diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go deleted file mode 100644 index cf3d03275..000000000 --- a/pkg/services/object/common/writer/local.go +++ /dev/null @@ -1,55 +0,0 @@ -package writer - -import ( - "context" - "fmt" - - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// ObjectStorage is an object storage interface. -type ObjectStorage interface { - // Put must save passed object - // and return any appeared error. - Put(context.Context, *objectSDK.Object, bool) error - // Delete must delete passed objects - // and return any appeared error. - Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error - // Lock must lock passed objects - // and return any appeared error. - Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error - // IsLocked must clarify object's lock status. - IsLocked(context.Context, oid.Address) (bool, error) -} - -type LocalTarget struct { - Storage ObjectStorage - Container containerSDK.Container -} - -func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error { - if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil { - return fmt.Errorf("(%T) could not put object to local storage: %w", t, err) - } - - switch meta.Type() { - case objectSDK.TypeTombstone: - err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects()) - if err != nil { - return fmt.Errorf("could not delete objects from tombstone locally: %w", err) - } - case objectSDK.TypeLock: - err := t.Storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects()) - if err != nil { - return fmt.Errorf("could not lock object from lock objects locally: %w", err) - } - default: - // objects that do not change meta storage - } - - return nil -} diff --git a/pkg/services/object/common/writer/remote.go b/pkg/services/object/common/writer/remote.go deleted file mode 100644 index 697613ff7..000000000 --- a/pkg/services/object/common/writer/remote.go +++ /dev/null @@ -1,131 +0,0 @@ -package writer - -import ( - "context" - "crypto/ecdsa" - "fmt" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type remoteWriter struct { - privateKey *ecdsa.PrivateKey - - commonPrm *util.CommonPrm - - nodeInfo clientcore.NodeInfo - - clientConstructor ClientConstructor -} - -// RemoteSender represents utility for -// sending an object to a remote host. -type RemoteSender struct { - keyStorage *util.KeyStorage - - clientConstructor ClientConstructor -} - -// RemotePutPrm groups remote put operation parameters. -type RemotePutPrm struct { - node netmap.NodeInfo - - obj *objectSDK.Object -} - -func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error { - c, err := t.clientConstructor.Get(t.nodeInfo) - if err != nil { - return fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err) - } - - var prm internalclient.PutObjectPrm - - prm.SetClient(c) - prm.SetPrivateKey(t.privateKey) - prm.SetSessionToken(t.commonPrm.SessionToken()) - prm.SetBearerToken(t.commonPrm.BearerToken()) - prm.SetXHeaders(t.commonPrm.XHeaders()) - prm.SetObject(obj) - - err = t.putSingle(ctx, prm) - if status.Code(err) != codes.Unimplemented { - return err - } - - return t.putStream(ctx, prm) -} - -func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error { - _, err := internalclient.PutObject(ctx, prm) - if err != nil { - return fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err) - } - return nil -} - -func (t *remoteWriter) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error { - _, err := internalclient.PutObjectSingle(ctx, prm) - if err != nil { - return fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err) - } - return nil -} - -// NewRemoteSender creates, initializes and returns new RemoteSender instance. -func NewRemoteSender(keyStorage *util.KeyStorage, cons ClientConstructor) *RemoteSender { - return &RemoteSender{ - keyStorage: keyStorage, - clientConstructor: cons, - } -} - -// WithNodeInfo sets information about the remote node. -func (p *RemotePutPrm) WithNodeInfo(v netmap.NodeInfo) *RemotePutPrm { - if p != nil { - p.node = v - } - - return p -} - -// WithObject sets transferred object. -func (p *RemotePutPrm) WithObject(v *objectSDK.Object) *RemotePutPrm { - if p != nil { - p.obj = v - } - - return p -} - -// PutObject sends object to remote node. -func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error { - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return err - } - - t := &remoteWriter{ - privateKey: key, - clientConstructor: s.clientConstructor, - } - - err = clientcore.NodeInfoFromRawNetmapElement(&t.nodeInfo, netmapCore.Node(p.node)) - if err != nil { - return fmt.Errorf("parse client node info: %w", err) - } - - if err := t.WriteObject(ctx, p.obj, objectcore.ContentMeta{}); err != nil { - return fmt.Errorf("(%T) could not send object: %w", s, err) - } - - return nil -} diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go deleted file mode 100644 index d3d2b41b4..000000000 --- a/pkg/services/object/common/writer/writer.go +++ /dev/null @@ -1,168 +0,0 @@ -package writer - -import ( - "context" - "crypto/ecdsa" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" -) - -type MaxSizeSource interface { - // MaxObjectSize returns maximum payload size - // of physically stored object in system. - // - // Must return 0 if value can not be obtained. - MaxObjectSize(context.Context) uint64 -} - -type ClientConstructor interface { - Get(client.NodeInfo) (client.MultiAddressClient, error) -} - -type InnerRing interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) -} - -type FormatValidatorConfig interface { - VerifySessionTokenIssuer() bool -} - -// Config represents a set of static parameters that are established during -// the initialization phase of all services. -type Config struct { - KeyStorage *objutil.KeyStorage - - MaxSizeSrc MaxSizeSource - - LocalStore ObjectStorage - - ContainerSource container.Source - - NetmapSource netmap.Source - - NetmapKeys netmap.AnnouncedKeys - - FormatValidator *object.FormatValidator - - NetworkState netmap.State - - ClientConstructor ClientConstructor - - Logger *logger.Logger - - VerifySessionTokenIssuer bool -} - -type Option func(*Config) - -func WithLogger(l *logger.Logger) Option { - return func(c *Config) { - c.Logger = l - } -} - -func WithVerifySessionTokenIssuer(v bool) Option { - return func(c *Config) { - c.VerifySessionTokenIssuer = v - } -} - -type Params struct { - Config *Config - - Common *objutil.CommonPrm - - Header *objectSDK.Object - - Container containerSDK.Container - - TraverseOpts []placement.Option - - Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error - - SignRequestPrivateKey *ecdsa.PrivateKey -} - -func New(prm *Params) transformer.ObjectWriter { - if container.IsECContainer(prm.Container) && object.IsECSupported(prm.Header) { - return newECWriter(prm) - } - return newDefaultObjectWriter(prm, false) -} - -func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.ObjectWriter { - var relay func(context.Context, NodeDescriptor) error - if prm.Relay != nil { - relay = func(ctx context.Context, node NodeDescriptor) error { - var info client.NodeInfo - - client.NodeInfoFromNetmapElement(&info, node.Info) - - c, err := prm.Config.ClientConstructor.Get(info) - if err != nil { - return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) - } - - return prm.Relay(ctx, info, c) - } - } - - var resetSuccessAfterOnBroadcast bool - traverseOpts := prm.TraverseOpts - if forECPlacement && !prm.Common.LocalOnly() { - // save non-regular and linking object to EC container. - // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc. - traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.Container.PlacementPolicy())+1))) - resetSuccessAfterOnBroadcast = true - } - - return &distributedWriter{ - cfg: prm.Config, - placementOpts: traverseOpts, - resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast, - nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget { - if node.Local { - return LocalTarget{ - Storage: prm.Config.LocalStore, - Container: prm.Container, - } - } - - rt := &remoteWriter{ - privateKey: prm.SignRequestPrivateKey, - commonPrm: prm.Common, - clientConstructor: prm.Config.ClientConstructor, - } - - client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.Info) - - return rt - }, - relay: relay, - } -} - -func newECWriter(prm *Params) transformer.ObjectWriter { - return &objectWriterDispatcher{ - ecWriter: &ECWriter{ - Config: prm.Config, - PlacementOpts: append(prm.TraverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC - Container: prm.Container, - Key: prm.SignRequestPrivateKey, - CommonPrm: prm.Common, - Relay: prm.Relay, - }, - repWriter: newDefaultObjectWriter(prm, true), - } -} diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go deleted file mode 100644 index 57e33fde7..000000000 --- a/pkg/services/object/delete/delete.go +++ /dev/null @@ -1,45 +0,0 @@ -package deletesvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "go.uber.org/zap" -) - -// Delete serves requests to remote the objects. -func (s *Service) Delete(ctx context.Context, prm Prm) error { - // If session token is not found we will fail during tombstone PUT. - // Here we fail immediately to ensure no unnecessary network communication is done. - if tok := prm.common.SessionToken(); tok != nil { - _, err := s.keyStorage.GetKey(&util.SessionInfo{ - ID: tok.ID(), - Owner: tok.Issuer(), - }) - if err != nil { - return err - } - } - - exec := &execCtx{ - svc: s, - prm: prm, - } - - exec.setLogger(s.log) - - return exec.execute(ctx) -} - -func (exec *execCtx) execute(ctx context.Context) error { - exec.log.Debug(ctx, logs.ServingRequest) - - if err := exec.executeLocal(ctx); err != nil { - exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) - return err - } - - exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) - return nil -} diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go deleted file mode 100644 index a99ba3586..000000000 --- a/pkg/services/object/delete/exec.go +++ /dev/null @@ -1,233 +0,0 @@ -package deletesvc - -import ( - "context" - "errors" - "fmt" - "slices" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -var errDeleteECChunk = errors.New("invalid operation: delete EC object chunk") - -type execCtx struct { - svc *Service - - prm Prm - - log *logger.Logger - - tombstone *objectSDK.Tombstone - - splitInfo *objectSDK.SplitInfo - - tombstoneObj *objectSDK.Object -} - -func (exec *execCtx) setLogger(l *logger.Logger) { - exec.log = l.With( - zap.String("request", "DELETE"), - zap.Stringer("address", exec.address()), - zap.Bool("local", exec.isLocal()), - zap.Bool("with session", exec.prm.common.SessionToken() != nil), - zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - ) -} - -func (exec *execCtx) isLocal() bool { - return exec.prm.common.LocalOnly() -} - -func (exec *execCtx) address() oid.Address { - return exec.prm.addr -} - -func (exec *execCtx) containerID() cid.ID { - return exec.prm.addr.Container() -} - -func (exec *execCtx) commonParameters() *util.CommonPrm { - return exec.prm.common -} - -func (exec *execCtx) newAddress(id oid.ID) oid.Address { - var a oid.Address - a.SetObject(id) - a.SetContainer(exec.containerID()) - - return a -} - -func (exec *execCtx) formExtendedInfo(ctx context.Context) error { - obj, err := exec.svc.header.head(ctx, exec) - - var errSplitInfo *objectSDK.SplitInfoError - var errECInfo *objectSDK.ECInfoError - - switch { - case err == nil: - if ech := obj.ECHeader(); ech != nil { - return errDeleteECChunk - } - return nil - case errors.As(err, &errSplitInfo): - exec.splitInfo = errSplitInfo.SplitInfo() - exec.tombstone.SetSplitID(exec.splitInfo.SplitID()) - - exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) - - if err := exec.collectMembers(ctx); err != nil { - return err - } - - exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected) - return nil - case errors.As(err, &errECInfo): - exec.log.Debug(ctx, logs.DeleteECObjectReceived) - return nil - } - - if !apiclient.IsErrObjectAlreadyRemoved(err) { - // IsErrObjectAlreadyRemoved check is required because splitInfo - // implicitly performs Head request that may return ObjectAlreadyRemoved - // status that is not specified for Delete. - return err - } - - return nil -} - -func (exec *execCtx) collectMembers(ctx context.Context) error { - if exec.splitInfo == nil { - exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY) - return nil - } - - var err error - if _, withLink := exec.splitInfo.Link(); withLink { - err = exec.collectChildren(ctx) - } - - if err != nil { - if _, withLast := exec.splitInfo.LastPart(); withLast { - if err := exec.collectChain(ctx); err != nil { - return err - } - } - } // may be fail if neither right nor linking ID is set? - - return exec.supplementBySplitID(ctx) -} - -func (exec *execCtx) collectChain(ctx context.Context) error { - var chain []oid.ID - - exec.log.Debug(ctx, logs.DeleteAssemblingChain) - - for prev, withPrev := exec.splitInfo.LastPart(); withPrev; { - chain = append(chain, prev) - - p, err := exec.svc.header.previous(ctx, exec, prev) - if err != nil { - return fmt.Errorf("get previous split element for %s: %w", prev, err) - } - - withPrev = p != nil - if withPrev { - prev = *p - } - } - - exec.addMembers(chain) - return nil -} - -func (exec *execCtx) collectChildren(ctx context.Context) error { - exec.log.Debug(ctx, logs.DeleteCollectingChildren) - - children, err := exec.svc.header.children(ctx, exec) - if err != nil { - return fmt.Errorf("collect children: %w", err) - } - - link, _ := exec.splitInfo.Link() - exec.addMembers(append(children, link)) - return nil -} - -func (exec *execCtx) supplementBySplitID(ctx context.Context) error { - exec.log.Debug(ctx, logs.DeleteSupplementBySplitID) - - chain, err := exec.svc.searcher.splitMembers(ctx, exec) - if err != nil { - return fmt.Errorf("search split chain members: %w", err) - } - - exec.addMembers(chain) - return nil -} - -func (exec *execCtx) addMembers(incoming []oid.ID) { - members := exec.tombstone.Members() - - for i := range members { - for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body - if members[i].Equals(incoming[j]) { - incoming = slices.Delete(incoming, j, j+1) - j-- - } - } - } - - exec.tombstone.SetMembers(append(members, incoming...)) -} - -func (exec *execCtx) initTombstoneObject() error { - payload, err := exec.tombstone.Marshal() - if err != nil { - return fmt.Errorf("marshal tombstone: %w", err) - } - - exec.tombstoneObj = objectSDK.New() - exec.tombstoneObj.SetContainerID(exec.containerID()) - exec.tombstoneObj.SetType(objectSDK.TypeTombstone) - exec.tombstoneObj.SetPayload(payload) - - tokenSession := exec.commonParameters().SessionToken() - if tokenSession != nil { - issuer := tokenSession.Issuer() - exec.tombstoneObj.SetOwnerID(issuer) - } else { - // make local node a tombstone object owner - localUser := exec.svc.netInfo.LocalNodeID() - exec.tombstoneObj.SetOwnerID(localUser) - } - - var a objectSDK.Attribute - a.SetKey(objectV2.SysAttributeExpEpoch) - a.SetValue(strconv.FormatUint(exec.tombstone.ExpirationEpoch(), 10)) - - exec.tombstoneObj.SetAttributes(a) - - return nil -} - -func (exec *execCtx) saveTombstone(ctx context.Context) error { - id, err := exec.svc.placer.put(ctx, exec) - if err != nil { - return fmt.Errorf("save tombstone: %w", err) - } - - exec.prm.tombAddrWriter.SetAddress(exec.newAddress(*id)) - return nil -} diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go deleted file mode 100644 index 01b2d9b3f..000000000 --- a/pkg/services/object/delete/local.go +++ /dev/null @@ -1,43 +0,0 @@ -package deletesvc - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (exec *execCtx) executeLocal(ctx context.Context) error { - exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure) - - if err := exec.formTombstone(ctx); err != nil { - return err - } - - exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving) - - return exec.saveTombstone(ctx) -} - -func (exec *execCtx) formTombstone(ctx context.Context) error { - tsLifetime, err := exec.svc.netInfo.TombstoneLifetime() - if err != nil { - return fmt.Errorf("fetch tombstone lifetime: %w", err) - } - - exec.tombstone = objectSDK.NewTombstone() - exec.tombstone.SetExpirationEpoch( - exec.svc.netInfo.CurrentEpoch() + tsLifetime, - ) - exec.addMembers([]oid.ID{exec.address().Object()}) - - exec.log.Debug(ctx, logs.DeleteFormingSplitInfo) - - if err := exec.formExtendedInfo(ctx); err != nil { - return fmt.Errorf("form extended info: %w", err) - } - - return exec.initTombstoneObject() -} diff --git a/pkg/services/object/delete/prm.go b/pkg/services/object/delete/prm.go deleted file mode 100644 index 92eb5dcb6..000000000 --- a/pkg/services/object/delete/prm.go +++ /dev/null @@ -1,35 +0,0 @@ -package deletesvc - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// TombstoneAddressWriter is an interface of tombstone address setter. -type TombstoneAddressWriter interface { - SetAddress(address oid.Address) -} - -// Prm groups parameters of Delete service call. -type Prm struct { - common *util.CommonPrm - - addr oid.Address - - tombAddrWriter TombstoneAddressWriter -} - -// SetCommonParameters sets common parameters of the operation. -func (p *Prm) SetCommonParameters(common *util.CommonPrm) { - p.common = common -} - -// WithAddress sets address of the object to be removed. -func (p *Prm) WithAddress(addr oid.Address) { - p.addr = addr -} - -// WithTombstoneAddressTarget sets tombstone address destination. -func (p *Prm) WithTombstoneAddressTarget(w TombstoneAddressWriter) { - p.tombAddrWriter = w -} diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go deleted file mode 100644 index 1c4d7d585..000000000 --- a/pkg/services/object/delete/service.go +++ /dev/null @@ -1,97 +0,0 @@ -package deletesvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.uber.org/zap" -) - -// Service utility serving requests of Object.Get service. -type Service struct { - *cfg -} - -// Option is a Service's constructor option. -type Option func(*cfg) - -// NetworkInfo wraps network state and configurations. -type NetworkInfo interface { - netmap.State - - // TombstoneLifetime must return the lifespan of the tombstones - // in the FrostFS epochs. - TombstoneLifetime() (uint64, error) - - // LocalNodeID returns user ID of the local storage node. Result must not be nil. - // New tombstone objects will have the result as an owner ID if removal is executed w/o a session. - LocalNodeID() user.ID -} - -type cfg struct { - log *logger.Logger - - header interface { - // must return (nil, nil) for PHY objects - head(context.Context, *execCtx) (*objectSDK.Object, error) - - children(context.Context, *execCtx) ([]oid.ID, error) - - // must return (nil, nil) for 1st object in chain - previous(context.Context, *execCtx, oid.ID) (*oid.ID, error) - } - - searcher interface { - splitMembers(context.Context, *execCtx) ([]oid.ID, error) - } - - placer interface { - put(context.Context, *execCtx) (*oid.ID, error) - } - - netInfo NetworkInfo - - keyStorage *util.KeyStorage -} - -// New creates, initializes and returns utility serving -// Object.Get service requests. -func New(gs *getsvc.Service, - ss *searchsvc.Service, - ps *putsvc.Service, - ni NetworkInfo, - ks *util.KeyStorage, - opts ...Option, -) *Service { - c := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - header: &headSvcWrapper{s: gs}, - searcher: &searchSvcWrapper{s: ss}, - placer: &putSvcWrapper{s: ps}, - netInfo: ni, - keyStorage: ks, - } - - for i := range opts { - opts[i](c) - } - - return &Service{ - cfg: c, - } -} - -// WithLogger returns option to specify Delete service's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go deleted file mode 100644 index a78fd7747..000000000 --- a/pkg/services/object/delete/util.go +++ /dev/null @@ -1,140 +0,0 @@ -package deletesvc - -import ( - "context" - - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type headSvcWrapper struct { - s *getsvc.Service -} - -type searchSvcWrapper struct { - s *searchsvc.Service -} - -type putSvcWrapper struct { - s *putsvc.Service -} - -type simpleIDWriter struct { - ids []oid.ID -} - -func (w *headSvcWrapper) headAddress(ctx context.Context, exec *execCtx, addr oid.Address) (*objectSDK.Object, error) { - wr := getsvc.NewSimpleObjectWriter() - - p := getsvc.HeadPrm{} - - if cp := exec.commonParameters(); cp != nil { - commonParameters := *cp - p.SetCommonParameters(&commonParameters) - } - - p.SetHeaderWriter(wr) - p.WithRawFlag(true) - p.WithAddress(addr) - - err := w.s.Head(ctx, p) - if err != nil { - return nil, err - } - - return wr.Object(), nil -} - -func (w *headSvcWrapper) head(ctx context.Context, exec *execCtx) (*objectSDK.Object, error) { - return w.headAddress(ctx, exec, exec.address()) -} - -func (w *headSvcWrapper) children(ctx context.Context, exec *execCtx) ([]oid.ID, error) { - link, _ := exec.splitInfo.Link() - - a := exec.newAddress(link) - - linking, err := w.headAddress(ctx, exec, a) - if err != nil { - return nil, err - } - - return linking.Children(), nil -} - -func (w *headSvcWrapper) previous(ctx context.Context, exec *execCtx, id oid.ID) (*oid.ID, error) { - a := exec.newAddress(id) - - h, err := w.headAddress(ctx, exec, a) - if err != nil { - return nil, err - } - - prev, ok := h.PreviousID() - if ok { - return &prev, nil - } - - return nil, nil -} - -func (w *searchSvcWrapper) splitMembers(ctx context.Context, exec *execCtx) ([]oid.ID, error) { - fs := objectSDK.SearchFilters{} - fs.AddSplitIDFilter(objectSDK.MatchStringEqual, exec.splitInfo.SplitID()) - - wr := new(simpleIDWriter) - - p := searchsvc.Prm{} - p.SetWriter(wr) - p.SetCommonParameters(exec.commonParameters()) - p.WithContainerID(exec.containerID()) - p.WithSearchFilters(fs) - - err := w.s.Search(ctx, p) - if err != nil { - return nil, err - } - - return wr.ids, nil -} - -func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error { - s.ids = append(s.ids, ids...) - - return nil -} - -func (w *putSvcWrapper) put(ctx context.Context, exec *execCtx) (*oid.ID, error) { - streamer, err := w.s.Put() - if err != nil { - return nil, err - } - - payload := exec.tombstoneObj.Payload() - - initPrm := new(putsvc.PutInitPrm). - WithCommonPrm(exec.commonParameters()). - WithObject(exec.tombstoneObj.CutPayload()) - - err = streamer.Init(ctx, initPrm) - if err != nil { - return nil, err - } - - err = streamer.SendChunk(ctx, new(putsvc.PutChunkPrm).WithChunk(payload)) - if err != nil { - return nil, err - } - - r, err := streamer.Close(ctx) - if err != nil { - return nil, err - } - - id := r.ObjectID() - - return &id, nil -} diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go deleted file mode 100644 index 7146f0361..000000000 --- a/pkg/services/object/delete/v2/service.go +++ /dev/null @@ -1,40 +0,0 @@ -package deletesvc - -import ( - "context" - - deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -// Service implements Delete operation of Object service v2. -type Service struct { - svc *deletesvc.Service -} - -// NewService constructs Service instance from provided options. -func NewService(svc *deletesvc.Service) *Service { - return &Service{ - svc: svc, - } -} - -// Delete calls internal service. -func (s *Service) Delete(ctx context.Context, req *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - resp := new(objectV2.DeleteResponse) - - body := new(objectV2.DeleteResponseBody) - resp.SetBody(body) - - p, err := s.toPrm(req, body) - if err != nil { - return nil, err - } - - err = s.svc.Delete(ctx, *p) - if err != nil { - return nil, err - } - - return resp, nil -} diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go deleted file mode 100644 index c57d4562a..000000000 --- a/pkg/services/object/delete/v2/util.go +++ /dev/null @@ -1,54 +0,0 @@ -package deletesvc - -import ( - "errors" - "fmt" - - deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type tombstoneBodyWriter struct { - body *objectV2.DeleteResponseBody -} - -func (s *Service) toPrm(req *objectV2.DeleteRequest, respBody *objectV2.DeleteResponseBody) (*deletesvc.Prm, error) { - body := req.GetBody() - - addrV2 := body.GetAddress() - if addrV2 == nil { - return nil, errors.New("missing object address") - } - - var addr oid.Address - - err := addr.ReadFromV2(*addrV2) - if err != nil { - return nil, fmt.Errorf("invalid object address: %w", err) - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - p := new(deletesvc.Prm) - p.SetCommonParameters(commonPrm) - - p.WithAddress(addr) - p.WithTombstoneAddressTarget(&tombstoneBodyWriter{ - body: respBody, - }) - - return p, nil -} - -func (w *tombstoneBodyWriter) SetAddress(addr oid.Address) { - var addrV2 refs.Address - addr.WriteToV2(&addrV2) - - w.body.SetTombstone(&addrV2) -} diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go deleted file mode 100644 index e80132489..000000000 --- a/pkg/services/object/get/assemble.go +++ /dev/null @@ -1,150 +0,0 @@ -package getsvc - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -func (r *request) assemble(ctx context.Context) { - if !r.canAssembleComplexObject() { - r.log.Debug(ctx, logs.GetCanNotAssembleTheObject) - return - } - - // Any access tokens are not expected to be used in the assembly process: - // - there is no requirement to specify child objects in session/bearer - // token for `GET`/`GETRANGE`/`RANGEHASH` requests in the API protocol, - // and, therefore, their missing in the original request should not be - // considered as error; on the other hand, without session for every child - // object, it is impossible to attach bearer token in the new generated - // requests correctly because the token has not been issued for that node's - // key; - // - the assembly process is expected to be handled on a container node - // only since the requests forwarding mechanism presentation; such the - // node should have enough rights for getting any child object by design. - r.prm.common.ForgetTokens() - - // Do not use forwarding during assembly stage. - // Request forwarding closure inherited in produced - // `execCtx` so it should be disabled there. - r.disableForwarding() - - r.log.Debug(ctx, logs.GetTryingToAssembleTheObject) - - r.prm.common = r.prm.common.WithLocalOnly(false) - assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly()) - - r.log.Debug(ctx, logs.GetAssemblingSplittedObject, - zap.Uint64("range_offset", r.ctxRange().GetOffset()), - zap.Uint64("range_length", r.ctxRange().GetLength()), - ) - defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted, - zap.Uint64("range_offset", r.ctxRange().GetOffset()), - zap.Uint64("range_length", r.ctxRange().GetLength()), - ) - - obj, err := assembler.Assemble(ctx, r.prm.objWriter) - if err != nil { - r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject, - zap.Error(err), - zap.Uint64("range_offset", r.ctxRange().GetOffset()), - zap.Uint64("range_length", r.ctxRange().GetLength()), - ) - } - - var errSplitInfo *objectSDK.SplitInfoError - var errRemovedRemote *apistatus.ObjectAlreadyRemoved - var errOutOfRangeRemote *apistatus.ObjectOutOfRange - var errRemovedLocal *apistatus.ObjectAlreadyRemoved - var errOutOfRangeLocal *apistatus.ObjectOutOfRange - - switch { - default: - r.status = statusUndefined - r.err = err - case err == nil: - r.status = statusOK - r.err = nil - r.collectedObject = obj - case errors.As(err, &errRemovedRemote): - r.status = statusINHUMED - r.err = errRemovedRemote - case errors.As(err, &errRemovedLocal): - r.status = statusINHUMED - r.err = errRemovedLocal - case errors.As(err, &errSplitInfo): - r.status = statusVIRTUAL - r.err = errSplitInfo - case errors.As(err, &errOutOfRangeRemote): - r.status = statusOutOfRange - r.err = errOutOfRangeRemote - case errors.As(err, &errOutOfRangeLocal): - r.status = statusOutOfRange - r.err = errOutOfRangeLocal - } -} - -func equalAddresses(a, b oid.Address) bool { - return a.Container().Equals(b.Container()) && a.Object().Equals(b.Object()) -} - -func (r *request) HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) { - w := NewSimpleObjectWriter() - - p := RequestParameters{} - p.common = p.common.WithLocalOnly(false) - p.addr.SetContainer(r.containerID()) - p.addr.SetObject(id) - p.head = true - p.SetHeaderWriter(w) - - if err := r.getObjectWithIndependentRequest(ctx, p); err != nil { - return nil, err - } - - return w.Object(), nil -} - -func (r *request) GetObjectAndWritePayload(ctx context.Context, id oid.ID, rng *objectSDK.Range, writer ChunkWriter) (*objectSDK.Object, error) { - w := &payloadWriter{ - origin: writer, - } - - p := r.prm - p.objWriter = w - p.rng = rng - - p.addr.SetContainer(r.containerID()) - p.addr.SetObject(id) - - if err := r.getObjectWithIndependentRequest(ctx, p); err != nil { - return nil, err - } - return w.obj, nil -} - -func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm RequestParameters) error { - detachedExecutor := &request{ - keyStore: r.keyStore, - traverserGenerator: r.traverserGenerator, - remoteStorageConstructor: r.remoteStorageConstructor, - epochSource: r.epochSource, - localStorage: r.localStorage, - containerSource: r.containerSource, - - prm: prm, - infoSplit: objectSDK.NewSplitInfo(), - infoEC: newECInfo(), - log: r.log, - } - - detachedExecutor.execute(ctx) - - return detachedExecutor.err -} diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go deleted file mode 100644 index 59dd7fd93..000000000 --- a/pkg/services/object/get/assembleec.go +++ /dev/null @@ -1,88 +0,0 @@ -package getsvc - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -func (r *request) assembleEC(ctx context.Context) { - if r.isRaw() { - r.log.Debug(ctx, logs.GetCanNotAssembleTheObject) - return - } - - // Any access tokens are not expected to be used in the assembly process: - // - there is no requirement to specify child objects in session/bearer - // token for `GET`/`GETRANGE`/`RANGEHASH` requests in the API protocol, - // and, therefore, their missing in the original request should not be - // considered as error; on the other hand, without session for every child - // object, it is impossible to attach bearer token in the new generated - // requests correctly because the token has not been issued for that node's - // key; - // - the assembly process is expected to be handled on a container node - // only since the requests forwarding mechanism presentation; such the - // node should have enough rights for getting any child object by design. - r.prm.common.ForgetTokens() - - // Do not use forwarding during assembly stage. - // Request forwarding closure inherited in produced - // `execCtx` so it should be disabled there. - r.disableForwarding() - - r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject) - - // initialize epoch number - ok := r.initEpoch(ctx) - if !ok { - return - } - - r.prm.common = r.prm.common.WithLocalOnly(false) - assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch) - - r.log.Debug(ctx, logs.GetAssemblingECObject, - zap.Uint64("range_offset", r.ctxRange().GetOffset()), - zap.Uint64("range_length", r.ctxRange().GetLength()), - ) - defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted, - zap.Uint64("range_offset", r.ctxRange().GetOffset()), - zap.Uint64("range_length", r.ctxRange().GetLength()), - ) - - obj, err := assembler.Assemble(ctx, r.prm.objWriter) - if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) { - r.log.Warn(ctx, logs.GetFailedToAssembleECObject, - zap.Error(err), - zap.Uint64("range_offset", r.ctxRange().GetOffset()), - zap.Uint64("range_length", r.ctxRange().GetLength()), - ) - } - - var errRemoved *apistatus.ObjectAlreadyRemoved - var errOutOfRange *apistatus.ObjectOutOfRange - var errECInfo *objectSDK.ECInfoError - - switch { - default: - r.status = statusUndefined - r.err = err - case err == nil: - r.status = statusOK - r.err = nil - r.collectedObject = obj - case errors.As(err, &errRemoved): - r.status = statusINHUMED - r.err = errRemoved - case errors.As(err, &errOutOfRange): - r.status = statusOutOfRange - r.err = errOutOfRange - case errors.As(err, &errECInfo): - r.status = statusEC - r.err = err - } -} diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go deleted file mode 100644 index b24c9417b..000000000 --- a/pkg/services/object/get/assembler.go +++ /dev/null @@ -1,233 +0,0 @@ -package getsvc - -import ( - "context" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type objectGetter interface { - GetObjectAndWritePayload(ctx context.Context, id oid.ID, rng *objectSDK.Range, writer ChunkWriter) (*objectSDK.Object, error) - HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) -} - -type assembler struct { - addr oid.Address - splitInfo *objectSDK.SplitInfo - rng *objectSDK.Range - objGetter objectGetter - head bool - - currentOffset uint64 - - parentObject *objectSDK.Object -} - -func newAssembler( - addr oid.Address, - splitInfo *objectSDK.SplitInfo, - rng *objectSDK.Range, - objGetter objectGetter, - head bool, -) *assembler { - return &assembler{ - addr: addr, - rng: rng, - splitInfo: splitInfo, - objGetter: objGetter, - head: head, - } -} - -// Assemble assembles splitted large object and writes it's content to ObjectWriter. -// It returns parent object. -func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - if a.head { - return a.assembleHeader(ctx, writer) - } - sourceObjectID, ok := a.getLastPartOrLinkObjectID() - if !ok { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - previousID, childrenIDs, err := a.initializeFromSourceObjectID(ctx, sourceObjectID) - if err != nil { - return nil, err - } - if previousID == nil && len(childrenIDs) == 0 { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - - if len(childrenIDs) > 0 { - if a.rng != nil { - err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer) - } else { - err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer) - } - } else { - if a.rng != nil { - err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer) - } else { - err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer) - } - } - if err != nil { - return nil, err - } - return a.parentObject, nil -} - -func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) { - sourceObjectID, ok := a.splitInfo.Link() - if ok { - return sourceObjectID, true - } - sourceObjectID, ok = a.splitInfo.LastPart() - if ok { - return sourceObjectID, true - } - return oid.ID{}, false -} - -func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID) (*oid.ID, []oid.ID, error) { - w := NewSimpleObjectWriter() - sourceObject, err := a.getChildObject(ctx, id, nil, true, w) - if err != nil { - return nil, nil, err - } - sourceObject.SetPayload(w.pld) - - parentObject := sourceObject.Parent() - if parentObject == nil { - return nil, nil, errChildWithEmptyParent - } - - a.parentObject = parentObject - - var payload []byte - - if a.rng != nil { - seekOff := a.rng.GetOffset() - seekLen := a.rng.GetLength() - seekTo := seekOff + seekLen - parentSize := parentObject.PayloadSize() - - if seekTo < seekOff || parentSize < seekOff || parentSize < seekTo { - return nil, nil, &apistatus.ObjectOutOfRange{} - } - - sourceSize := sourceObject.PayloadSize() - - a.currentOffset = parentSize - sourceSize - - from := uint64(0) - if a.currentOffset < seekOff { - from = seekOff - a.currentOffset - } - - to := uint64(0) - if seekOff+seekLen >= a.currentOffset+from { - to = seekOff + seekLen - a.currentOffset - } - - payload = sourceObject.Payload()[from:to] - a.rng.SetLength(a.rng.GetLength() - to + from) - } else { - payload = sourceObject.Payload() - } - - a.parentObject.SetPayload(payload) - - idPrev, ok := sourceObject.PreviousID() - if ok { - return &idPrev, sourceObject.Children(), nil - } - - return nil, sourceObject.Children(), nil -} - -func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSDK.Range, verifyIsChild bool, writer ChunkWriter) (*objectSDK.Object, error) { - obj, err := a.objGetter.GetObjectAndWritePayload(ctx, id, rng, writer) - if err != nil { - return nil, err - } - - if verifyIsChild && !a.isChild(obj) { - return nil, errParentAddressDiffers - } - return obj, nil -} - -func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } - return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true) -} - -func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } - if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil { - return err - } - if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part - return err - } - return nil -} - -func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error { - for i := range partIDs { - _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer) - if err != nil { - return err - } - } - return nil -} - -func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, err := a.buildChain(ctx, prevID) - if err != nil { - return err - } - - slices.Reverse(chain) - return a.assemblePayloadByObjectIDs(ctx, writer, chain, false) -} - -func (a *assembler) isChild(obj *objectSDK.Object) bool { - parent := obj.Parent() - return parent == nil || equalAddresses(a.addr, object.AddressOf(parent)) -} - -func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) { - var ( - chain []oid.ID - - hasPrev = true - ) - - // fill the chain end-to-start - for hasPrev { - head, err := a.objGetter.HeadObject(ctx, prevID) - if err != nil { - return nil, err - } - if !a.isChild(head) { - return nil, errParentAddressDiffers - } - - id, _ := head.ID() - chain = append(chain, id) - - prevID, hasPrev = head.PreviousID() - } - - return chain, nil -} diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go deleted file mode 100644 index ff213cb82..000000000 --- a/pkg/services/object/get/assembler_head.go +++ /dev/null @@ -1,45 +0,0 @@ -package getsvc - -import ( - "context" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - var sourceObjectIDs []oid.ID - sourceObjectID, ok := a.splitInfo.Link() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - sourceObjectID, ok = a.splitInfo.LastPart() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - if len(sourceObjectIDs) == 0 { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - for _, sourceObjectID = range sourceObjectIDs { - obj, err := a.getParent(ctx, sourceObjectID, writer) - if err == nil { - return obj, nil - } - } - return nil, objectSDK.NewSplitInfoError(a.splitInfo) -} - -func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) - if err != nil { - return nil, err - } - parent := obj.Parent() - if parent == nil { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - if err := writer.WriteHeader(ctx, parent); err != nil { - return nil, err - } - return obj, nil -} diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go deleted file mode 100644 index 780693c40..000000000 --- a/pkg/services/object/get/assembler_range.go +++ /dev/null @@ -1,87 +0,0 @@ -package getsvc - -import ( - "context" - "slices" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { - return err - } - return writer.WriteChunk(ctx, a.parentObject.Payload()) -} - -func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil { - return err - } - if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part - return err - } - return nil -} - -func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error { - for i := range partIDs { - _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer) - if err != nil { - return err - } - } - return nil -} - -func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, rngs, err := a.buildChainRange(ctx, prevID) - if err != nil { - return err - } - - slices.Reverse(chain) - slices.Reverse(rngs) - return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs) -} - -func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { - var ( - chain []oid.ID - rngs []objectSDK.Range - from = a.rng.GetOffset() - to = from + a.rng.GetLength() - - hasPrev = true - ) - - // fill the chain end-to-start - for hasPrev && from < a.currentOffset { - head, err := a.objGetter.HeadObject(ctx, prevID) - if err != nil { - return nil, nil, err - } - if !a.isChild(head) { - return nil, nil, errParentAddressDiffers - } - - nextOffset := a.currentOffset - head.PayloadSize() - clampedFrom := max(from, nextOffset) - clampedTo := min(to, a.currentOffset) - if clampedFrom < clampedTo { - index := len(rngs) - rngs = append(rngs, objectSDK.Range{}) - rngs[index].SetOffset(clampedFrom - nextOffset) - rngs[index].SetLength(clampedTo - clampedFrom) - - id, _ := head.ID() - chain = append(chain, id) - } - - a.currentOffset = nextOffset - prevID, hasPrev = head.PreviousID() - } - - return chain, rngs, nil -} diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go deleted file mode 100644 index e0a7e1da6..000000000 --- a/pkg/services/object/get/assemblerec.go +++ /dev/null @@ -1,297 +0,0 @@ -package getsvc - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -var errECPartsRetrieveCompleted = errors.New("EC parts receive completed") - -type ecRemoteStorage interface { - getObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo) (*objectSDK.Object, error) - headObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo, raw bool) (*objectSDK.Object, error) -} - -type assemblerec struct { - addr oid.Address - ecInfo *ecInfo - rng *objectSDK.Range - remoteStorage ecRemoteStorage - localStorage localStorage - log *logger.Logger - head bool - traverserGenerator traverserGenerator - epoch uint64 -} - -func newAssemblerEC( - addr oid.Address, - ecInfo *ecInfo, - rng *objectSDK.Range, - remoteStorage ecRemoteStorage, - localStorage localStorage, - log *logger.Logger, - head bool, - tg traverserGenerator, - epoch uint64, -) *assemblerec { - return &assemblerec{ - addr: addr, - rng: rng, - ecInfo: ecInfo, - remoteStorage: remoteStorage, - localStorage: localStorage, - log: log, - head: head, - traverserGenerator: tg, - epoch: epoch, - } -} - -// Assemble assembles erasure-coded object and writes it's content to ObjectWriter. -// It returns parent object. -func (a *assemblerec) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - switch { - case a.head: - return a.reconstructHeader(ctx, writer) - case a.rng != nil: - return a.reconstructRange(ctx, writer) - default: - return a.reconstructObject(ctx, writer) - } -} - -func (a *assemblerec) getConstructor(cnr *container.Container) (*erasurecode.Constructor, error) { - dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy()) - parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy()) - return erasurecode.NewConstructor(dataCount, parityCount) -} - -func (a *assemblerec) reconstructHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.reconstructObjectFromParts(ctx, true) - if err == nil { - return obj, writer.WriteHeader(ctx, obj) - } - return nil, err -} - -func (a *assemblerec) reconstructRange(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.reconstructObjectFromParts(ctx, false) - if err != nil { - return nil, err - } - - from := a.rng.GetOffset() - to := from + a.rng.GetLength() - if pLen := uint64(len(obj.Payload())); to < from || pLen < from || pLen < to { - return nil, &apistatus.ObjectOutOfRange{} - } - err = writer.WriteChunk(ctx, obj.Payload()[from:to]) - if err != nil { - return nil, err - } - return obj, err -} - -func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.reconstructObjectFromParts(ctx, false) - if err == nil { - err = writer.WriteHeader(ctx, obj.CutPayload()) - if err == nil { - err = writer.WriteChunk(ctx, obj.Payload()) - if err != nil { - return nil, err - } - } - } - return obj, err -} - -func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) { - objID := a.addr.Object() - trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch) - if err != nil { - return nil, err - } - c, err := a.getConstructor(cnr) - if err != nil { - return nil, err - } - parts := a.retrieveParts(ctx, trav, cnr) - if headers { - return c.ReconstructHeader(parts) - } - return c.Reconstruct(parts) -} - -func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Traverser, cnr *container.Container) []*objectSDK.Object { - dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy()) - parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy()) - - remoteNodes := make([]placement.Node, 0) - for { - batch := trav.Next() - if len(batch) == 0 { - break - } - remoteNodes = append(remoteNodes, batch...) - } - - parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount) - if err != nil { - a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err)) - } - return parts -} - -func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placement.Node, dataCount, parityCount int) ([]*objectSDK.Object, error) { - foundChunks := make(map[uint32]*objectSDK.Object) - var foundChunksGuard sync.Mutex - eg, ctx := errgroup.WithContext(ctx) - eg.SetLimit(dataCount) - - for _, ch := range a.ecInfo.localChunks { - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - object := a.tryGetChunkFromLocalStorage(ctx, ch) - if object == nil { - return nil - } - foundChunksGuard.Lock() - foundChunks[ch.Index] = object - count := len(foundChunks) - foundChunksGuard.Unlock() - if count >= dataCount { - return errECPartsRetrieveCompleted - } - return nil - }) - } - - for _, node := range nodes { - var info client.NodeInfo - client.NodeInfoFromNetmapElement(&info, node) - eg.Go(func() error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - chunks := a.tryGetChunkListFromNode(ctx, info) - for _, ch := range chunks { - object := a.tryGetChunkFromRemoteStorage(ctx, info, ch) - if object == nil { - continue - } - foundChunksGuard.Lock() - foundChunks[ch.Index] = object - count := len(foundChunks) - foundChunksGuard.Unlock() - if count >= dataCount { - return errECPartsRetrieveCompleted - } - } - return nil - }) - } - err := eg.Wait() - if err == nil || errors.Is(err, errECPartsRetrieveCompleted) { - parts := make([]*objectSDK.Object, dataCount+parityCount) - for idx, chunk := range foundChunks { - parts[idx] = chunk - } - return parts, nil - } - return nil, err -} - -func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch objectSDK.ECChunk) *objectSDK.Object { - var objID oid.ID - err := objID.ReadFromV2(ch.ID) - if err != nil { - a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) - return nil - } - var addr oid.Address - addr.SetContainer(a.addr.Container()) - addr.SetObject(objID) - var object *objectSDK.Object - if a.head { - object, err = a.localStorage.Head(ctx, addr, false) - if err != nil && !errors.Is(err, context.Canceled) { - a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - } - } else { - object, err = a.localStorage.Get(ctx, addr) - if err != nil && !errors.Is(err, context.Canceled) { - a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - } - } - return object -} - -func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.NodeInfo) []objectSDK.ECChunk { - if chunks, found := a.ecInfo.remoteChunks[string(node.PublicKey())]; found { - return chunks - } - var errECInfo *objectSDK.ECInfoError - _, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true) - if err == nil { - a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey()))) - return nil - } - if !errors.As(err, &errECInfo) { - a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) - return nil - } - result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks)) - for _, ch := range errECInfo.ECInfo().Chunks { - result = append(result, objectSDK.ECChunk(ch)) - } - return result -} - -func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node client.NodeInfo, ch objectSDK.ECChunk) *objectSDK.Object { - var objID oid.ID - err := objID.ReadFromV2(ch.ID) - if err != nil { - a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) - return nil - } - var addr oid.Address - addr.SetContainer(a.addr.Container()) - addr.SetObject(objID) - var object *objectSDK.Object - if a.head { - object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false) - if err != nil && !errors.Is(err, context.Canceled) { - a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - } - } else { - object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node) - if err != nil && !errors.Is(err, context.Canceled) { - a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - } - } - return object -} diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go deleted file mode 100644 index dfb31133c..000000000 --- a/pkg/services/object/get/container.go +++ /dev/null @@ -1,90 +0,0 @@ -package getsvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "go.uber.org/zap" -) - -func (r *request) executeOnContainer(ctx context.Context) { - if r.isLocal() { - r.log.Debug(ctx, logs.GetReturnResultDirectly) - return - } - - lookupDepth := r.netmapLookupDepth() - - r.log.Debug(ctx, logs.TryingToExecuteInContainer, - zap.Uint64("netmap lookup depth", lookupDepth), - ) - - // initialize epoch number - ok := r.initEpoch(ctx) - if !ok { - return - } - - localStatus := r.status - - for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { - lookupDepth-- - - // go to the previous epoch - r.curProcEpoch-- - } -} - -func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool { - r.log.Debug(ctx, logs.ProcessEpoch, - zap.Uint64("number", r.curProcEpoch), - ) - - traverser, ok := r.generateTraverser(ctx, r.address()) - if !ok { - return true - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - if localStatus == statusEC { // possible only for raw == true and local == false - r.status = statusEC - } else { - r.status = statusUndefined - } - - for { - addrs := traverser.Next() - if len(addrs) == 0 { - r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration) - - return false - } - - for i := range addrs { - select { - case <-ctx.Done(): - r.log.Debug(ctx, logs.InterruptPlacementIterationByContext, - zap.Error(ctx.Err()), - ) - - return true - default: - } - - // TODO: #1142 consider parallel execution - // TODO: #1142 consider optimization: if status == SPLIT we can continue until - // we reach the best result - split info with linking object ID. - var info client.NodeInfo - - client.NodeInfoFromNetmapElement(&info, addrs[i]) - - if r.processNode(ctx, info) { - r.log.Debug(ctx, logs.GetCompletingTheOperation) - return true - } - } - } -} diff --git a/pkg/services/object/get/errors.go b/pkg/services/object/get/errors.go deleted file mode 100644 index 6ea16a144..000000000 --- a/pkg/services/object/get/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package getsvc - -import "errors" - -var ( - errRangeZeroLength = errors.New("zero range length") - errRangeOverflow = errors.New("range overflow") - errChildWithEmptyParent = errors.New("received child with empty parent") - errParentAddressDiffers = errors.New("parent address in child object differs") -) diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go deleted file mode 100644 index 3a50308c2..000000000 --- a/pkg/services/object/get/get.go +++ /dev/null @@ -1,137 +0,0 @@ -package getsvc - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -// Get serves a request to get an object by address, and returns Streamer instance. -func (s *Service) Get(ctx context.Context, prm Prm) error { - return s.get(ctx, RequestParameters{ - commonPrm: prm.commonPrm, - }) -} - -// GetRange serves a request to get an object by address, and returns Streamer instance. -func (s *Service) GetRange(ctx context.Context, prm RangePrm) error { - return s.get(ctx, RequestParameters{ - commonPrm: prm.commonPrm, - rng: prm.rng, - }) -} - -func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHashRes, error) { - hashes := make([][]byte, 0, len(prm.rngs)) - - for _, rng := range prm.rngs { - h := prm.hashGen() - - // For big ranges we could fetch range-hashes from different nodes and concatenate them locally. - // However, - // 1. Potential gains are insignificant when operating in the Internet given typical latencies and losses. - // 2. Parallel solution is more complex in terms of code. - // 3. TZ-hash is likely to be disabled in private installations. - reqPrm := RequestParameters{ - commonPrm: prm.commonPrm, - rng: &rng, - } - reqPrm.SetChunkWriter(&hasherWrapper{ - hash: util.NewSaltingWriter(h, prm.salt), - }) - - if err := s.get(ctx, reqPrm); err != nil { - return nil, err - } - - hashes = append(hashes, h.Sum(nil)) - } - - return &RangeHashRes{ - hashes: hashes, - }, nil -} - -// Head reads object header from container. -// -// Returns ErrNotFound if the header was not received for the call. -// Returns SplitInfoError if object is virtual and raw flag is set. -func (s *Service) Head(ctx context.Context, prm HeadPrm) error { - return s.get(ctx, RequestParameters{ - head: true, - commonPrm: prm.commonPrm, - }) -} - -func (s *Service) get(ctx context.Context, prm RequestParameters) error { - exec := &request{ - keyStore: s.keyStore, - traverserGenerator: s.traverserGenerator, - remoteStorageConstructor: s.remoteStorageConstructor, - epochSource: s.epochSource, - localStorage: s.localStorage, - containerSource: s.containerSource, - - prm: prm, - infoSplit: objectSDK.NewSplitInfo(), - infoEC: newECInfo(), - log: s.log, - } - - exec.setLogger(s.log) - - exec.execute(ctx) - - return exec.err -} - -func (r *request) execute(ctx context.Context) { - r.log.Debug(ctx, logs.ServingRequest) - - // perform local operation - r.executeLocal(ctx) - - r.analyzeStatus(ctx, true) -} - -func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { - // analyze local result - switch r.status { - case statusOK: - r.log.Debug(ctx, logs.OperationFinishedSuccessfully) - case statusINHUMED: - r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) - case statusVIRTUAL: - r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) - r.assemble(ctx) - case statusOutOfRange: - r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) - case statusEC: - r.log.Debug(ctx, logs.GetRequestedObjectIsEC) - if r.isRaw() && execCnr { - r.executeOnContainer(ctx) - r.analyzeStatus(ctx, false) - } - r.assembleEC(ctx) - default: - r.log.Debug(ctx, logs.OperationFinishedWithError, - zap.Error(r.err), - ) - var errAccessDenied *apistatus.ObjectAccessDenied - if execCnr && errors.As(r.err, &errAccessDenied) { - // Local get can't return access denied error, so this error was returned by - // write to the output stream. So there is no need to try to find object on other nodes. - return - } - - if execCnr { - r.executeOnContainer(ctx) - r.analyzeStatus(ctx, false) - } - } -} diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go deleted file mode 100644 index 3efc72065..000000000 --- a/pkg/services/object/get/get_test.go +++ /dev/null @@ -1,1949 +0,0 @@ -package getsvc - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/rand" - "errors" - "fmt" - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - netmaptest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type testStorage struct { - inhumed map[string]struct{} - - virtual map[string]*objectSDK.SplitInfo - - phy map[string]*objectSDK.Object -} - -type testTraverserGenerator struct { - c container.Container - b map[uint64]placement.Builder -} - -type testPlacementBuilder struct { - vectors map[string][][]netmap.NodeInfo -} - -type testClientCache struct { - clients map[string]*testClient -} - -type testClient struct { - results map[string]struct { - obj *objectSDK.Object - err error - } -} - -type testEpochReceiver uint64 - -func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { - return uint64(e), nil -} - -func (e testEpochReceiver) CurrentEpoch() uint64 { - return uint64(e) -} - -func newTestStorage() *testStorage { - return &testStorage{ - inhumed: make(map[string]struct{}), - virtual: make(map[string]*objectSDK.SplitInfo), - phy: make(map[string]*objectSDK.Object), - } -} - -func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { - opts := make([]placement.Option, 0, 4) - opts = append(opts, - placement.ForContainer(g.c), - placement.UseBuilder(g.b[e]), - placement.SuccessAfter(1), - ) - - if obj != nil { - opts = append(opts, placement.ForObject(*obj)) - } - - t, err := placement.NewTraverser(context.Background(), opts...) - return t, &containerCore.Container{ - Value: g.c, - }, err -} - -func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { - var addr oid.Address - addr.SetContainer(cnr) - - if obj != nil { - addr.SetObject(*obj) - } - - vs, ok := p.vectors[addr.EncodeToString()] - if !ok { - return nil, errors.New("vectors for address not found") - } - - return vs, nil -} - -func (c *testClientCache) Get(info client.NodeInfo) (remoteStorage, error) { - v, ok := c.clients[network.StringifyGroup(info.AddressGroup())] - if !ok { - return nil, errors.New("could not construct client") - } - - return v, nil -} - -func newTestClient() *testClient { - return &testClient{ - results: map[string]struct { - obj *objectSDK.Object - err error - }{}, - } -} - -func (c *testClient) addResult(addr oid.Address, obj *objectSDK.Object, err error) { - c.results[addr.EncodeToString()] = struct { - obj *objectSDK.Object - err error - }{obj: obj, err: err} -} - -func (c *testClient) Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { - v, ok := c.results[address.EncodeToString()] - if !ok { - return nil, new(apistatus.ObjectNotFound) - } - - if v.err != nil { - return nil, v.err - } - - return v.obj, nil -} - -func (c *testClient) Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { - return c.Get(ctx, address, requestParams) -} - -func (c *testClient) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) { - obj, err := c.Get(ctx, address, requestParams) - if err != nil { - return nil, err - } - return cutToRange(obj, rng), nil -} - -func (c *testClient) ForwardRequest(ctx context.Context, info client.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) { - return nil, fmt.Errorf("not implemented") -} - -func (s *testStorage) Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) { - return s.Range(ctx, address, nil) -} - -func (s *testStorage) Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) { - return s.Range(ctx, address, nil) -} - -func (s *testStorage) Range(_ context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) { - var ( - ok bool - obj *objectSDK.Object - sAddr = address.EncodeToString() - ) - - if _, ok = s.inhumed[sAddr]; ok { - return nil, new(apistatus.ObjectAlreadyRemoved) - } - - if info, ok := s.virtual[sAddr]; ok { - return nil, objectSDK.NewSplitInfoError(info) - } - - if obj, ok = s.phy[sAddr]; ok { - return cutToRange(obj, rng), nil - } - - return nil, new(apistatus.ObjectNotFound) -} - -func cutToRange(o *objectSDK.Object, rng *objectSDK.Range) *objectSDK.Object { - if rng == nil { - return o - } - - from := rng.GetOffset() - to := from + rng.GetLength() - - payload := o.Payload() - - o = o.CutPayload() - o.SetPayload(payload[from:to]) - - return o -} - -func (s *testStorage) addPhy(addr oid.Address, obj *objectSDK.Object) { - s.phy[addr.EncodeToString()] = obj -} - -func (s *testStorage) addVirtual(addr oid.Address, info *objectSDK.SplitInfo) { - s.virtual[addr.EncodeToString()] = info -} - -func (s *testStorage) inhume(addr oid.Address) { - s.inhumed[addr.EncodeToString()] = struct{}{} -} - -func generateObject(addr oid.Address, prev *oid.ID, payload []byte, children ...oid.ID) *objectSDK.Object { - obj := objectSDK.New() - obj.SetContainerID(addr.Container()) - obj.SetID(addr.Object()) - obj.SetPayload(payload) - obj.SetPayloadSize(uint64(len(payload))) - if prev != nil { - obj.SetPreviousID(*prev) - } - obj.SetChildren(children...) - - return obj -} - -type writeHeaderError struct{} - -func (whe *writeHeaderError) Error() string { - return "write header error" -} - -type writeHeaderErrorObjectWriter struct{} - -func (w *writeHeaderErrorObjectWriter) WriteHeader(_ context.Context, _ *objectSDK.Object) error { - return &writeHeaderError{} -} - -func (w *writeHeaderErrorObjectWriter) WriteChunk(_ context.Context, _ []byte) error { - return nil -} - -type writePayloadError struct{} - -func (whe *writePayloadError) Error() string { - return "write payload error" -} - -type writePayloadErrorObjectWriter struct{} - -func (w *writePayloadErrorObjectWriter) WriteHeader(_ context.Context, _ *objectSDK.Object) error { - return nil -} - -func (w *writePayloadErrorObjectWriter) WriteChunk(_ context.Context, _ []byte) error { - return &writePayloadError{} -} - -type testKeyStorage struct{} - -func (ks *testKeyStorage) GetKey(_ *util.SessionInfo) (*ecdsa.PrivateKey, error) { - return &ecdsa.PrivateKey{}, nil -} - -func TestGetLocalOnly(t *testing.T) { - ctx := context.Background() - - newSvc := func(storage *testStorage) *Service { - return &Service{ - log: test.NewLogger(t), - localStorage: storage, - } - } - - newPrm := func(raw bool, w ObjectWriter) Prm { - p := Prm{} - p.SetObjectWriter(w) - p.WithRawFlag(raw) - p.common = new(util.CommonPrm).WithLocalOnly(true) - - return p - } - - newRngPrm := func(raw bool, w ChunkWriter, off, ln uint64) RangePrm { - p := RangePrm{} - p.SetChunkWriter(w) - p.WithRawFlag(raw) - p.common = new(util.CommonPrm).WithLocalOnly(true) - - r := objectSDK.NewRange() - r.SetOffset(off) - r.SetLength(ln) - - p.SetRange(r) - - return p - } - - newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm { - p := HeadPrm{} - p.SetHeaderWriter(w) - p.WithRawFlag(raw) - p.common = new(util.CommonPrm).WithLocalOnly(true) - - return p - } - - t.Run("OK", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - w := NewSimpleObjectWriter() - p := newPrm(false, w) - - payloadSz := uint64(10) - payload := make([]byte, payloadSz) - rand.Read(payload) - - addr := oidtest.Address() - obj := generateObject(addr, nil, payload) - - storage.addPhy(addr, obj) - - p.WithAddress(addr) - - storage.addPhy(addr, obj) - - err := svc.Get(ctx, p) - - require.NoError(t, err) - - require.Equal(t, obj, w.Object()) - - w = NewSimpleObjectWriter() - - rngPrm := newRngPrm(false, w, payloadSz/3, payloadSz/3) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.NoError(t, err) - require.Equal(t, payload[payloadSz/3:2*payloadSz/3], w.Object().Payload()) - - w = NewSimpleObjectWriter() - headPrm := newHeadPrm(false, w) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.NoError(t, err) - require.Equal(t, obj.CutPayload(), w.Object()) - }) - - t.Run("INHUMED", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - p := newPrm(false, nil) - - addr := oidtest.Address() - - storage.inhume(addr) - - p.WithAddress(addr) - - err := svc.Get(ctx, p) - - require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err)) - - rngPrm := newRngPrm(false, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err)) - - headPrm := newHeadPrm(false, nil) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err)) - }) - - t.Run("404", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - p := newPrm(false, nil) - - addr := oidtest.Address() - - p.WithAddress(addr) - - err := svc.Get(ctx, p) - - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - rngPrm := newRngPrm(false, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - headPrm := newHeadPrm(false, nil) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - }) - - t.Run("VIRTUAL", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - p := newPrm(true, nil) - - addr := oidtest.Address() - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetSplitID(objectSDK.NewSplitID()) - splitInfo.SetLink(oidtest.ID()) - splitInfo.SetLastPart(oidtest.ID()) - - p.WithAddress(addr) - - storage.addVirtual(addr, splitInfo) - - err := svc.Get(ctx, p) - - errSplit := objectSDK.NewSplitInfoError(objectSDK.NewSplitInfo()) - - require.True(t, errors.As(err, &errSplit)) - - require.Equal(t, splitInfo, errSplit.SplitInfo()) - - rngPrm := newRngPrm(true, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.Get(ctx, p) - - require.True(t, errors.As(err, &errSplit)) - - headPrm := newHeadPrm(true, nil) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.True(t, errors.As(err, &errSplit)) - require.Equal(t, splitInfo, errSplit.SplitInfo()) - }) -} - -func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { - mNodes := make([][]netmap.NodeInfo, len(dim)) - mAddr := make([][]string, len(dim)) - - for i := range dim { - ns := make([]netmap.NodeInfo, dim[i]) - as := make([]string, dim[i]) - - for j := range dim[i] { - a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", - strconv.Itoa(i), - strconv.Itoa(60000+j), - ) - - var ni netmap.NodeInfo - ni.SetNetworkEndpoints(a) - ni.SetPublicKey([]byte(a)) - - var na network.AddressGroup - - err := na.FromIterator(netmapcore.Node(ni)) - require.NoError(t, err) - - as[j] = network.StringifyGroup(na) - - ns[j] = ni - } - - mNodes[i] = ns - mAddr[i] = as - } - - return mNodes, mAddr -} - -func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) { - curID := oidtest.ID() - var prevID *oid.ID - - var addr oid.Address - addr.SetContainer(cnr) - - res := make([]*objectSDK.Object, 0, ln) - ids := make([]oid.ID, 0, ln) - payload := make([]byte, 0, ln*10) - - for i := range ln { - ids = append(ids, curID) - addr.SetObject(curID) - - payloadPart := make([]byte, 10) - rand.Read(payloadPart) - - o := generateObject(addr, prevID, []byte{byte(i)}) - o.SetPayload(payloadPart) - o.SetPayloadSize(uint64(len(payloadPart))) - o.SetID(curID) - - payload = append(payload, payloadPart...) - - res = append(res, o) - - cpCurID := curID - prevID = &cpCurID - curID = oidtest.ID() - } - - return res, ids, payload -} - -func TestGetRemoteSmall(t *testing.T) { - ctx := context.Background() - - var cnr container.Container - cnr.SetPlacementPolicy(netmaptest.PlacementPolicy()) - - var idCnr cid.ID - container.CalculateID(&idCnr, cnr) - - newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service { - const curEpoch = 13 - - return &Service{ - log: test.NewLogger(t), - localStorage: newTestStorage(), - traverserGenerator: &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: b, - }, - }, - epochSource: testEpochReceiver(curEpoch), - remoteStorageConstructor: c, - keyStore: &testKeyStorage{}, - } - } - - newPrm := func(raw bool, w ObjectWriter) Prm { - p := Prm{} - p.SetObjectWriter(w) - p.WithRawFlag(raw) - p.common = new(util.CommonPrm).WithLocalOnly(false) - - return p - } - - newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm { - p := HeadPrm{} - p.SetHeaderWriter(w) - p.WithRawFlag(raw) - p.common = new(util.CommonPrm).WithLocalOnly(false) - - return p - } - - t.Run("OK", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - - ns, as := testNodeMatrix(t, []int{2}) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - }, - } - - payloadSz := uint64(10) - payload := make([]byte, payloadSz) - rand.Read(payload) - - obj := generateObject(addr, nil, payload) - - c1 := newTestClient() - c1.addResult(addr, obj, nil) - - c2 := newTestClient() - c2.addResult(addr, nil, errors.New("any error")) - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.NoError(t, err) - require.Equal(t, obj, w.Object()) - - *c1, *c2 = *c2, *c1 - - err = svc.Get(ctx, p) - require.NoError(t, err) - require.Equal(t, obj, w.Object()) - - w = NewSimpleObjectWriter() - rngPrm := newRngPrm(false, w, payloadSz/3, payloadSz/3) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.NoError(t, err) - require.Equal(t, payload[payloadSz/3:2*payloadSz/3], w.Object().Payload()) - - w = NewSimpleObjectWriter() - headPrm := newHeadPrm(false, w) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.NoError(t, err) - require.Equal(t, obj.CutPayload(), w.Object()) - }) - - t.Run("INHUMED", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - - ns, as := testNodeMatrix(t, []int{2}) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - }, - } - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, new(apistatus.ObjectAlreadyRemoved)) - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - p := newPrm(false, nil) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err)) - - rngPrm := newRngPrm(false, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err)) - - headPrm := newHeadPrm(false, nil) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.True(t, clientSDK.IsErrObjectAlreadyRemoved(err)) - }) - - t.Run("404", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - - ns, as := testNodeMatrix(t, []int{2}) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - }, - } - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, errors.New("any error")) - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - p := newPrm(false, nil) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - rngPrm := newRngPrm(false, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - headPrm := newHeadPrm(false, nil) - headPrm.WithAddress(addr) - - err = svc.Head(ctx, headPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - }) - - t.Run("VIRTUAL", func(t *testing.T) { - testHeadVirtual := func(svc *Service, addr oid.Address, i *objectSDK.SplitInfo) { - headPrm := newHeadPrm(true, nil) - headPrm.WithAddress(addr) - - errSplit := objectSDK.NewSplitInfoError(objectSDK.NewSplitInfo()) - - err := svc.Head(ctx, headPrm) - require.True(t, errors.As(err, &errSplit)) - require.Equal(t, i, errSplit.SplitInfo()) - } - - t.Run("linking", func(t *testing.T) { - t.Run("get linking failure", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - var splitAddr oid.Address - splitAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - splitAddr.SetObject(idLink) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(splitAddr, nil, new(apistatus.ObjectNotFound)) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(splitAddr, nil, new(apistatus.ObjectNotFound)) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - splitAddr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - p := newPrm(false, nil) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - rngPrm := newRngPrm(false, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - }) - - t.Run("get chain element failure", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - srcObj.SetPayloadSize(10) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - children, childIDs, _ := generateChain(2, idCnr) - - var linkAddr oid.Address - linkAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - linkAddr.SetObject(idLink) - - linkingObj := generateObject(linkAddr, nil, nil, childIDs...) - linkingObj.SetParentID(addr.Object()) - linkingObj.SetParent(srcObj) - - var child1Addr oid.Address - child1Addr.SetContainer(idCnr) - child1Addr.SetObject(childIDs[0]) - - var child2Addr oid.Address - child2Addr.SetContainer(idCnr) - child2Addr.SetObject(childIDs[1]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(linkAddr, nil, errors.New("any error")) - c1.addResult(child1Addr, nil, errors.New("any error")) - c1.addResult(child2Addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(linkAddr, linkingObj, nil) - c2.addResult(child1Addr, children[0], nil) - c2.addResult(child2Addr, nil, new(apistatus.ObjectNotFound)) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - linkAddr.EncodeToString(): ns, - child1Addr.EncodeToString(): ns, - child2Addr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - p := newPrm(false, NewSimpleObjectWriter()) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - rngPrm := newRngPrm(false, NewSimpleObjectWriter(), 0, 1) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - }) - - t.Run("OK", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - children, childIDs, payload := generateChain(2, idCnr) - srcObj.SetPayload(payload) - srcObj.SetPayloadSize(uint64(len(payload))) - children[len(children)-1].SetParent(srcObj) - - var linkAddr oid.Address - linkAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - linkAddr.SetObject(idLink) - - linkingObj := generateObject(linkAddr, nil, nil, childIDs...) - linkingObj.SetParentID(addr.Object()) - linkingObj.SetParent(srcObj) - - var child1Addr oid.Address - child1Addr.SetContainer(idCnr) - child1Addr.SetObject(childIDs[0]) - - var child2Addr oid.Address - child2Addr.SetContainer(idCnr) - child2Addr.SetObject(childIDs[1]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(linkAddr, nil, errors.New("any error")) - c1.addResult(child1Addr, nil, errors.New("any error")) - c1.addResult(child2Addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(linkAddr, linkingObj, nil) - c2.addResult(child1Addr, children[0], nil) - c2.addResult(child2Addr, children[1], nil) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - linkAddr.EncodeToString(): ns, - child1Addr.EncodeToString(): ns, - child2Addr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.NoError(t, err) - require.Equal(t, srcObj, w.Object()) - - w = NewSimpleObjectWriter() - payloadSz := srcObj.PayloadSize() - - off := payloadSz / 3 - ln := payloadSz / 3 - - rngPrm := newRngPrm(false, w, off, ln) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.NoError(t, err) - require.Equal(t, payload[off:off+ln], w.Object().Payload()) - }) - - t.Run("write header/payload failure", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - children, childIDs, payload := generateChain(2, idCnr) - srcObj.SetPayload(payload) - srcObj.SetPayloadSize(uint64(len(payload))) - children[len(children)-1].SetParent(srcObj) - - var linkAddr oid.Address - linkAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - linkAddr.SetObject(idLink) - - linkingObj := generateObject(linkAddr, nil, nil, childIDs...) - linkingObj.SetParentID(addr.Object()) - linkingObj.SetParent(srcObj) - - var child1Addr oid.Address - child1Addr.SetContainer(idCnr) - child1Addr.SetObject(childIDs[0]) - - var child2Addr oid.Address - child2Addr.SetContainer(idCnr) - child2Addr.SetObject(childIDs[1]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(linkAddr, nil, errors.New("any error")) - c1.addResult(child1Addr, nil, errors.New("any error")) - c1.addResult(child2Addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(linkAddr, linkingObj, nil) - c2.addResult(child1Addr, children[0], nil) - c2.addResult(child2Addr, children[1], nil) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - linkAddr.EncodeToString(): ns, - child1Addr.EncodeToString(): ns, - child2Addr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - wh := &writeHeaderErrorObjectWriter{} - - p := newPrm(false, wh) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.ErrorAs(t, err, new(*writeHeaderError)) - - wp := &writePayloadErrorObjectWriter{} - - p = newPrm(false, wp) - p.WithAddress(addr) - - err = svc.Get(ctx, p) - require.ErrorAs(t, err, new(*writePayloadError)) - }) - - t.Run("linked object not a child of parent", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - children, childIDs, payload := generateChain(2, idCnr) - srcObj.SetPayload(payload) - srcObj.SetPayloadSize(uint64(len(payload))) - children[len(children)-1].SetParent(srcObj) - - var linkAddr oid.Address - linkAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - linkAddr.SetObject(idLink) - - wrongParentAddr := oidtest.Address() - wrongParentID := oidtest.ID() - wrongParentAddr.SetObject(wrongParentID) - wrongParentAddr.SetContainer(idCnr) - wrongParent := generateObject(wrongParentAddr, nil, nil) - - linkingObj := generateObject(linkAddr, nil, nil, childIDs...) - linkingObj.SetParentID(wrongParentID) - linkingObj.SetParent(wrongParent) - - var child1Addr oid.Address - child1Addr.SetContainer(idCnr) - child1Addr.SetObject(childIDs[0]) - - var child2Addr oid.Address - child2Addr.SetContainer(idCnr) - child2Addr.SetObject(childIDs[1]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(linkAddr, nil, errors.New("any error")) - c1.addResult(child1Addr, nil, errors.New("any error")) - c1.addResult(child2Addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(linkAddr, linkingObj, nil) - c2.addResult(child1Addr, children[0], nil) - c2.addResult(child2Addr, children[1], nil) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - linkAddr.EncodeToString(): ns, - child1Addr.EncodeToString(): ns, - child2Addr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.ErrorIs(t, err, errParentAddressDiffers) - - w = NewSimpleObjectWriter() - payloadSz := srcObj.PayloadSize() - - off := payloadSz / 3 - ln := payloadSz / 3 - - rngPrm := newRngPrm(false, w, off, ln) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.ErrorIs(t, err, errParentAddressDiffers) - }) - - t.Run("linked object with parent udefined", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - children, childIDs, payload := generateChain(2, idCnr) - srcObj.SetPayload(payload) - srcObj.SetPayloadSize(uint64(len(payload))) - children[len(children)-1].SetParent(srcObj) - - var linkAddr oid.Address - linkAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - linkAddr.SetObject(idLink) - - linkingObj := generateObject(linkAddr, nil, nil, childIDs...) - - var child1Addr oid.Address - child1Addr.SetContainer(idCnr) - child1Addr.SetObject(childIDs[0]) - - var child2Addr oid.Address - child2Addr.SetContainer(idCnr) - child2Addr.SetObject(childIDs[1]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(linkAddr, nil, errors.New("any error")) - c1.addResult(child1Addr, nil, errors.New("any error")) - c1.addResult(child2Addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(linkAddr, linkingObj, nil) - c2.addResult(child1Addr, children[0], nil) - c2.addResult(child2Addr, children[1], nil) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - linkAddr.EncodeToString(): ns, - child1Addr.EncodeToString(): ns, - child2Addr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.Error(t, err) - require.ErrorIs(t, err, errChildWithEmptyParent) - - w = NewSimpleObjectWriter() - payloadSz := srcObj.PayloadSize() - - off := payloadSz / 3 - ln := payloadSz / 3 - - rngPrm := newRngPrm(false, w, off, ln) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.Error(t, err) - require.ErrorIs(t, err, errChildWithEmptyParent) - }) - - t.Run("out of range", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLink(oidtest.ID()) - - children, childIDs, payload := generateChain(2, idCnr) - srcObj.SetPayload(payload) - srcObj.SetPayloadSize(uint64(len(payload))) - children[len(children)-1].SetParent(srcObj) - - var linkAddr oid.Address - linkAddr.SetContainer(idCnr) - idLink, _ := splitInfo.Link() - linkAddr.SetObject(idLink) - - linkingObj := generateObject(linkAddr, nil, nil, childIDs...) - linkingObj.SetParentID(addr.Object()) - linkingObj.SetParent(srcObj) - - var child1Addr oid.Address - child1Addr.SetContainer(idCnr) - child1Addr.SetObject(childIDs[0]) - - var child2Addr oid.Address - child2Addr.SetContainer(idCnr) - child2Addr.SetObject(childIDs[1]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(linkAddr, nil, errors.New("any error")) - c1.addResult(child1Addr, nil, errors.New("any error")) - c1.addResult(child2Addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(linkAddr, linkingObj, nil) - c2.addResult(child1Addr, children[0], nil) - c2.addResult(child2Addr, children[1], nil) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - linkAddr.EncodeToString(): ns, - child1Addr.EncodeToString(): ns, - child2Addr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newRngPrm(false, w, uint64(len(payload)), uint64(len(payload))) - p.WithAddress(addr) - - err := svc.GetRange(ctx, p) - require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange)) - }) - }) - - t.Run("right child", func(t *testing.T) { - t.Run("get right child failure", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLastPart(oidtest.ID()) - - var splitAddr oid.Address - splitAddr.SetContainer(idCnr) - idLast, _ := splitInfo.LastPart() - splitAddr.SetObject(idLast) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(splitAddr, nil, new(apistatus.ObjectNotFound)) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(splitAddr, nil, new(apistatus.ObjectNotFound)) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - splitAddr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - p := newPrm(false, nil) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - rngPrm := newRngPrm(false, nil, 0, 0) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - }) - - t.Run("get chain element failure", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - srcObj.SetPayloadSize(11) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLastPart(oidtest.ID()) - - children, _, _ := generateChain(2, idCnr) - - var rightAddr oid.Address - rightAddr.SetContainer(idCnr) - idLast, _ := splitInfo.LastPart() - rightAddr.SetObject(idLast) - - rightObj := children[len(children)-1] - - rightObj.SetParentID(addr.Object()) - rightObj.SetParent(srcObj) - - preRightAddr := object.AddressOf(children[len(children)-2]) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - c1.addResult(rightAddr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - c2.addResult(rightAddr, rightObj, nil) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - rightAddr.EncodeToString(): ns, - preRightAddr.EncodeToString(): ns, - }, - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - headSvc := newTestClient() - headSvc.addResult(preRightAddr, nil, new(apistatus.ObjectNotFound)) - - p := newPrm(false, NewSimpleObjectWriter()) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - rngPrm := newRngPrm(false, nil, 0, 1) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - }) - - t.Run("child has different parent", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLastPart(oidtest.ID()) - - children, _, payload := generateChain(2, idCnr) - srcObj.SetPayloadSize(uint64(len(payload))) - srcObj.SetPayload(payload) - - wrongParentAddr := oidtest.Address() - wrongParentID := oidtest.ID() - wrongParentAddr.SetObject(wrongParentID) - wrongParentAddr.SetContainer(idCnr) - wrongParent := generateObject(wrongParentAddr, nil, nil) - - rightObj := children[len(children)-1] - - idLast, _ := splitInfo.LastPart() - rightObj.SetID(idLast) - rightObj.SetParentID(addr.Object()) - rightObj.SetParent(srcObj) - - firstObj := children[0] - firstObj.SetParent(wrongParent) - firstObj.SetParentID(wrongParentID) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - - for i := range children { - c1.addResult(object.AddressOf(children[i]), nil, errors.New("any error")) - } - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - - for i := range children { - c2.addResult(object.AddressOf(children[i]), children[i], nil) - } - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{}, - } - - builder.vectors[addr.EncodeToString()] = ns - - for i := range children { - builder.vectors[object.AddressOf(children[i]).EncodeToString()] = ns - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.ErrorIs(t, err, errParentAddressDiffers) - - w = NewSimpleObjectWriter() - payloadSz := srcObj.PayloadSize() - - off := payloadSz / 3 - ln := payloadSz / 3 - - rngPrm := newRngPrm(false, w, off, ln) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.ErrorIs(t, err, errParentAddressDiffers) - }) - - t.Run("OK", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - srcObj := generateObject(addr, nil, nil) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - splitInfo.SetLastPart(oidtest.ID()) - - children, _, payload := generateChain(2, idCnr) - srcObj.SetPayloadSize(uint64(len(payload))) - srcObj.SetPayload(payload) - - rightObj := children[len(children)-1] - - idLast, _ := splitInfo.LastPart() - rightObj.SetID(idLast) - rightObj.SetParentID(addr.Object()) - rightObj.SetParent(srcObj) - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - - for i := range children { - c1.addResult(object.AddressOf(children[i]), nil, errors.New("any error")) - } - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - - for i := range children { - c2.addResult(object.AddressOf(children[i]), children[i], nil) - } - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{}, - } - - builder.vectors[addr.EncodeToString()] = ns - - for i := range children { - builder.vectors[object.AddressOf(children[i]).EncodeToString()] = ns - } - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.NoError(t, err) - require.Equal(t, srcObj, w.Object()) - - w = NewSimpleObjectWriter() - payloadSz := srcObj.PayloadSize() - - off := payloadSz / 3 - ln := payloadSz / 3 - - rngPrm := newRngPrm(false, w, off, ln) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.NoError(t, err) - require.Equal(t, payload[off:off+ln], w.Object().Payload()) - - w = NewSimpleObjectWriter() - off = payloadSz - 2 - ln = 1 - - rngPrm = newRngPrm(false, w, off, ln) - rngPrm.WithAddress(addr) - - err = svc.GetRange(ctx, rngPrm) - require.NoError(t, err) - require.Equal(t, payload[off:off+ln], w.Object().Payload()) - }) - }) - - t.Run("corrupted source object", func(t *testing.T) { - addr := oidtest.Address() - addr.SetContainer(idCnr) - addr.SetObject(oidtest.ID()) - - ns, as := testNodeMatrix(t, []int{2}) - - splitInfo := objectSDK.NewSplitInfo() - - c1 := newTestClient() - c1.addResult(addr, nil, errors.New("any error")) - - c2 := newTestClient() - c2.addResult(addr, nil, objectSDK.NewSplitInfoError(splitInfo)) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{}, - } - - builder.vectors[addr.EncodeToString()] = ns - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - testHeadVirtual(svc, addr, splitInfo) - - w := NewSimpleObjectWriter() - - p := newPrm(false, w) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.ErrorAs(t, err, new(*objectSDK.SplitInfoError)) - }) - }) -} - -type testTarget struct { - objects []*objectSDK.Object -} - -func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error { - tt.objects = append(tt.objects, obj) - return nil -} - -func objectChain(t *testing.T, cnr cid.ID, singleSize, totalSize uint64) (oid.ID, []*objectSDK.Object, *objectSDK.Object, []byte) { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - tt := new(testTarget) - p := transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: &pk.PrivateKey, - NextTargetInit: func() transformer.ObjectWriter { return tt }, - NetworkState: testEpochReceiver(1), - MaxSize: singleSize, - }) - - payload := make([]byte, totalSize) - _, err = rand.Read(payload) - require.NoError(t, err) - - ver := version.Current() - hdr := objectSDK.New() - hdr.SetContainerID(cnr) - hdr.SetType(objectSDK.TypeRegular) - hdr.SetVersion(&ver) - - ctx := context.Background() - require.NoError(t, p.WriteHeader(ctx, hdr)) - - _, err = p.Write(ctx, payload) - require.NoError(t, err) - - res, err := p.Close(ctx) - require.NoError(t, err) - - if totalSize <= singleSize { - // Small object, no linking. - require.Len(t, tt.objects, 1) - return res.SelfID, tt.objects, nil, payload - } - - return *res.ParentID, tt.objects[:len(tt.objects)-1], tt.objects[len(tt.objects)-1], bytes.Clone(payload) -} - -func newRngPrm(raw bool, w ChunkWriter, off, ln uint64) RangePrm { - p := RangePrm{} - p.SetChunkWriter(w) - p.WithRawFlag(raw) - p.common = new(util.CommonPrm) - - r := objectSDK.NewRange() - r.SetOffset(off) - r.SetLength(ln) - - p.SetRange(r) - return p -} - -func TestGetRange(t *testing.T) { - var cnr container.Container - cnr.SetPlacementPolicy(netmaptest.PlacementPolicy()) - - var idCnr cid.ID - container.CalculateID(&idCnr, cnr) - - ns, as := testNodeMatrix(t, []int{2}) - - testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) { - w := NewSimpleObjectWriter() - rngPrm := newRngPrm(false, w, from, to-from) - rngPrm.WithAddress(addr) - - err := svc.GetRange(context.Background(), rngPrm) - require.NoError(t, err) - if from == to { - require.Nil(t, w.Object().Payload()) - } else { - require.Equal(t, payload[from:to], w.Object().Payload()) - } - } - - newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service { - const curEpoch = 13 - - return &Service{ - log: test.NewLogger(t), - localStorage: newTestStorage(), - traverserGenerator: &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: b, - }, - }, - epochSource: testEpochReceiver(curEpoch), - remoteStorageConstructor: c, - keyStore: &testKeyStorage{}, - } - } - - t.Run("small", func(t *testing.T) { - const totalSize = 5 - _, objs, _, payload := objectChain(t, idCnr, totalSize, totalSize) - require.Len(t, objs, 1) - require.Len(t, payload, totalSize) - - obj := objs[0] - addr := object.AddressOf(obj) - builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{addr.EncodeToString(): ns}} - - c1 := newTestClient() - c1.addResult(addr, obj, nil) - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c1, - }, - }) - - for from := range totalSize - 1 { - for to := from; to < totalSize; to++ { - t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { - testGetRange(t, svc, addr, uint64(from), uint64(to), payload) - }) - } - } - }) - t.Run("big", func(t *testing.T) { - const totalSize = 9 - id, objs, link, payload := objectChain(t, idCnr, 3, totalSize) // 3 parts - require.Equal(t, totalSize, len(payload)) - - builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{}} - builder.vectors[idCnr.EncodeToString()+"/"+id.EncodeToString()] = ns - builder.vectors[object.AddressOf(link).EncodeToString()] = ns - for i := range objs { - builder.vectors[object.AddressOf(objs[i]).EncodeToString()] = ns - } - - var addr oid.Address - addr.SetContainer(idCnr) - addr.SetObject(id) - - const ( - linkingLast = "splitinfo=last" - linkingChildren = "splitinfo=children" - linkingBoth = "splitinfo=both" - ) - - lastID, _ := objs[len(objs)-1].ID() - linkID, _ := link.ID() - - for _, kind := range []string{linkingLast, linkingChildren, linkingBoth} { - t.Run(kind, func(t *testing.T) { - c1 := newTestClient() - for i := range objs { - c1.addResult(object.AddressOf(objs[i]), objs[i], nil) - } - - c1.addResult(object.AddressOf(link), link, nil) - - si := objectSDK.NewSplitInfo() - switch kind { - case linkingLast: - si.SetLastPart(lastID) - case linkingChildren: - si.SetLink(linkID) - case linkingBoth: - si.SetLastPart(lastID) - si.SetLink(linkID) - } - c1.addResult(addr, nil, objectSDK.NewSplitInfoError(si)) - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c1, - as[0][1]: c1, - }, - }) - - for from := range totalSize - 1 { - for to := from; to < totalSize; to++ { - t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { - testGetRange(t, svc, addr, uint64(from), uint64(to), payload) - }) - } - } - }) - } - }) -} - -func TestGetFromPastEpoch(t *testing.T) { - ctx := context.Background() - - var cnr container.Container - cnr.SetPlacementPolicy(netmaptest.PlacementPolicy()) - - var idCnr cid.ID - container.CalculateID(&idCnr, cnr) - - addr := oidtest.Address() - addr.SetContainer(idCnr) - - payloadSz := uint64(10) - payload := make([]byte, payloadSz) - _, _ = rand.Read(payload) - - obj := generateObject(addr, nil, payload) - - ns, as := testNodeMatrix(t, []int{2, 2}) - - c11 := newTestClient() - c11.addResult(addr, nil, errors.New("any error")) - - c12 := newTestClient() - c12.addResult(addr, nil, errors.New("any error")) - - c21 := newTestClient() - c21.addResult(addr, nil, errors.New("any error")) - - c22 := newTestClient() - c22.addResult(addr, obj, nil) - - const curEpoch = 13 - - svc := &Service{ - log: test.NewLogger(t), - localStorage: newTestStorage(), - epochSource: testEpochReceiver(curEpoch), - traverserGenerator: &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns[:1], - }, - }, - curEpoch - 1: &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns[1:], - }, - }, - }, - }, - remoteStorageConstructor: &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c11, - as[0][1]: c12, - as[1][0]: c21, - as[1][1]: c22, - }, - }, - keyStore: &testKeyStorage{}, - } - - w := NewSimpleObjectWriter() - - commonPrm := new(util.CommonPrm) - - p := Prm{} - p.SetObjectWriter(w) - p.SetCommonParameters(commonPrm) - p.WithAddress(addr) - - err := svc.Get(ctx, p) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - commonPrm.SetNetmapLookupDepth(1) - - err = svc.Get(ctx, p) - require.NoError(t, err) - require.Equal(t, obj, w.Object()) - - rp := RangePrm{} - rp.SetChunkWriter(w) - commonPrm.SetNetmapLookupDepth(0) - rp.SetCommonParameters(commonPrm) - rp.WithAddress(addr) - - off, ln := payloadSz/3, payloadSz/3 - - r := objectSDK.NewRange() - r.SetOffset(off) - r.SetLength(ln) - - rp.SetRange(r) - - err = svc.GetRange(ctx, rp) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - w = NewSimpleObjectWriter() - rp.SetChunkWriter(w) - commonPrm.SetNetmapLookupDepth(1) - - err = svc.GetRange(ctx, rp) - require.NoError(t, err) - require.Equal(t, payload[off:off+ln], w.Object().Payload()) - - hp := HeadPrm{} - hp.SetHeaderWriter(w) - commonPrm.SetNetmapLookupDepth(0) - hp.SetCommonParameters(commonPrm) - hp.WithAddress(addr) - - err = svc.Head(ctx, hp) - require.True(t, clientSDK.IsErrObjectNotFound(err)) - - w = NewSimpleObjectWriter() - hp.SetHeaderWriter(w) - commonPrm.SetNetmapLookupDepth(1) - - err = svc.Head(ctx, hp) - require.NoError(t, err) - require.Equal(t, obj.CutPayload(), w.Object()) -} diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go deleted file mode 100644 index 83ef54744..000000000 --- a/pkg/services/object/get/getrangeec_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/rand" - "fmt" - "testing" - - coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type containerStorage struct { - cnt *container.Container -} - -func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) { - coreCnt := coreContainer.Container{ - Value: *cs.cnt, - } - return &coreCnt, nil -} - -func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { - return nil, nil -} - -func TestGetRangeEC(t *testing.T) { - var dataCount uint32 = 3 - var parityCount uint32 = 1 - cnr := container.Container{} - p := netmap.PlacementPolicy{} - p.SetContainerBackupFactor(1) - x := netmap.ReplicaDescriptor{} - x.SetECDataCount(dataCount) - x.SetECParityCount(parityCount) - p.AddReplicas(x) - cnr.SetPlacementPolicy(p) - - var idCnr cid.ID - container.CalculateID(&idCnr, cnr) - - ns, as := testNodeMatrix(t, []int{4}) - - testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) { - w := NewSimpleObjectWriter() - rngPrm := newRngPrm(false, w, from, to-from) - rngPrm.WithAddress(addr) - - err := svc.GetRange(context.Background(), rngPrm) - require.NoError(t, err) - if from == to { - require.Nil(t, w.Object().Payload()) - } else { - require.Equal(t, payload[from:to], w.Object().Payload()) - } - } - - newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service { - const curEpoch = 13 - - return &Service{ - log: test.NewLogger(t), - localStorage: newTestStorage(), - traverserGenerator: &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: b, - }, - }, - epochSource: testEpochReceiver(curEpoch), - remoteStorageConstructor: c, - keyStore: &testKeyStorage{}, - containerSource: &containerStorage{ - cnt: &cnr, - }, - } - } - const totalSize = 5 - obj, parts := objectECChain(t, &idCnr, &cnr, totalSize, totalSize) - require.Len(t, parts, int(dataCount+parityCount)) - require.Len(t, obj.Payload(), totalSize) - - addr := object.AddressOf(obj) - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - }, - } - - clients := map[string]*testClient{} - for i, part := range parts { - builder.vectors[object.AddressOf(part).EncodeToString()] = ns - - tc := newTestClient() - - ecInfo := objectSDK.NewECInfo() - - chunk := objectSDK.ECChunk{} - chunk.Total = uint32(len(parts)) - chunk.Index = uint32(i) - id, _ := part.ID() - idv2 := refs.ObjectID{} - id.WriteToV2(&idv2) - chunk.ID = idv2 - - ecInfo.AddChunk(chunk) - errECInfo := objectSDK.NewECInfoError(ecInfo) - - tc.addResult(addr, nil, errECInfo) - tc.addResult(object.AddressOf(part), part, nil) - - clients[as[0][i]] = tc - } - - svc := newSvc(builder, &testClientCache{ - clients: clients, - }) - - for from := range totalSize - 1 { - for to := from; to < totalSize; to++ { - t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { - testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload()) - }) - } - } -} - -func objectECChain(t *testing.T, cnrId *cid.ID, cnr *container.Container, singleSize, totalSize uint64) (*objectSDK.Object, []*objectSDK.Object) { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - tt := new(testTarget) - p := transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: &pk.PrivateKey, - NextTargetInit: func() transformer.ObjectWriter { return tt }, - NetworkState: testEpochReceiver(1), - MaxSize: singleSize, - }) - - payload := make([]byte, totalSize) - _, err = rand.Read(payload) - require.NoError(t, err) - - ver := version.Current() - hdr := objectSDK.New() - hdr.SetContainerID(*cnrId) - hdr.SetType(objectSDK.TypeRegular) - hdr.SetVersion(&ver) - - ctx := context.Background() - require.NoError(t, p.WriteHeader(ctx, hdr)) - - _, err = p.Write(ctx, payload) - require.NoError(t, err) - - _, err = p.Close(ctx) - require.NoError(t, err) - - require.Len(t, tt.objects, 1) - - c, err := erasurecode.NewConstructor(policy.ECDataCount(cnr.PlacementPolicy()), policy.ECParityCount(cnr.PlacementPolicy())) - require.NoError(t, err) - parts, err := c.Split(tt.objects[0], &pk.PrivateKey) - require.NoError(t, err) - - return tt.objects[0], parts -} diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go deleted file mode 100644 index cfabb082f..000000000 --- a/pkg/services/object/get/local.go +++ /dev/null @@ -1,63 +0,0 @@ -package getsvc - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -func (r *request) executeLocal(ctx context.Context) { - ctx, span := tracing.StartSpanFromContext(ctx, "getService.executeLocal") - defer func() { - span.End() - }() - - var err error - - r.collectedObject, err = r.get(ctx) - - var errSplitInfo *objectSDK.SplitInfoError - var errECInfo *objectSDK.ECInfoError - var errRemoved *apistatus.ObjectAlreadyRemoved - var errOutOfRange *apistatus.ObjectOutOfRange - - switch { - default: - r.status = statusUndefined - r.err = err - - r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err)) - case err == nil: - r.status = statusOK - r.err = nil - r.writeCollectedObject(ctx) - case errors.As(err, &errRemoved): - r.status = statusINHUMED - r.err = errRemoved - case errors.As(err, &errSplitInfo): - r.status = statusVIRTUAL - mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo()) - r.err = objectSDK.NewSplitInfoError(r.infoSplit) - case errors.As(err, &errECInfo): - r.status = statusEC - r.err = r.infoEC.addLocal(errECInfo.ECInfo()) - case errors.As(err, &errOutOfRange): - r.status = statusOutOfRange - r.err = errOutOfRange - } -} - -func (r *request) get(ctx context.Context) (*objectSDK.Object, error) { - if r.headOnly() { - return r.localStorage.Head(ctx, r.address(), r.isRaw()) - } - if rng := r.ctxRange(); rng != nil { - return r.localStorage.Range(ctx, r.address(), rng) - } - return r.localStorage.Get(ctx, r.address()) -} diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go deleted file mode 100644 index 94c07381c..000000000 --- a/pkg/services/object/get/prm.go +++ /dev/null @@ -1,151 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - "hash" - - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Prm groups parameters of Get service call. -type Prm struct { - commonPrm -} - -// RangePrm groups parameters of GetRange service call. -type RangePrm struct { - commonPrm - - rng *objectSDK.Range -} - -// Validate pre-validates `OBJECTRANGE` request's parameters content -// without access to the requested object's payload. -func (p RangePrm) Validate() error { - if p.rng != nil { - off := p.rng.GetOffset() - l := p.rng.GetLength() - - if l == 0 { - return errRangeZeroLength - } - - if off+l <= off { - return errRangeOverflow - } - } - - return nil -} - -// RangeHashPrm groups parameters of GetRange service call. -type RangeHashPrm struct { - commonPrm - - hashGen func() hash.Hash - - rngs []objectSDK.Range - - salt []byte -} - -type RequestParameters struct { - commonPrm - head bool - rng *objectSDK.Range -} - -type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) (*objectSDK.Object, error) - -// HeadPrm groups parameters of Head service call. -type HeadPrm struct { - commonPrm -} - -type commonPrm struct { - objWriter ObjectWriter - - common *util.CommonPrm - - addr oid.Address - - raw bool - - forwarder RequestForwarder - - // signerKey is a cached key that should be used for spawned - // requests (if any), could be nil if incoming request handling - // routine does not include any key fetching operations - signerKey *ecdsa.PrivateKey -} - -// SetObjectWriter sets target component to write the object. -func (p *Prm) SetObjectWriter(w ObjectWriter) { - p.objWriter = w -} - -// SetChunkWriter sets target component to write the object payload range. -func (p *commonPrm) SetChunkWriter(w ChunkWriter) { - p.objWriter = &partWriter{ - chunkWriter: w, - } -} - -// SetRange sets range of the requested payload data. -func (p *RangePrm) SetRange(rng *objectSDK.Range) { - p.rng = rng -} - -// SetRangeList sets a list of object payload ranges. -func (p *RangeHashPrm) SetRangeList(rngs []objectSDK.Range) { - p.rngs = rngs -} - -// SetHashGenerator sets constructor of hashing algorithm. -func (p *RangeHashPrm) SetHashGenerator(v func() hash.Hash) { - p.hashGen = v -} - -// SetSalt sets binary salt to XOR object's payload ranges before hash calculation. -func (p *RangeHashPrm) SetSalt(salt []byte) { - p.salt = salt -} - -// SetCommonParameters sets common parameters of the operation. -func (p *commonPrm) SetCommonParameters(common *util.CommonPrm) { - p.common = common -} - -func (p *commonPrm) SetRequestForwarder(f RequestForwarder) { - p.forwarder = f -} - -func (p *commonPrm) SetSignerKey(signerKey *ecdsa.PrivateKey) { - p.signerKey = signerKey -} - -// WithAddress sets object address to be read. -func (p *commonPrm) WithAddress(addr oid.Address) { - p.addr = addr -} - -// WithRawFlag sets flag of raw reading. -func (p *commonPrm) WithRawFlag(raw bool) { - p.raw = raw -} - -// WithCachedSignerKey sets optional key for all further requests. -func (p *commonPrm) WithCachedSignerKey(signerKey *ecdsa.PrivateKey) { - p.signerKey = signerKey -} - -// SetHeaderWriter sets target component to write the object header. -func (p *commonPrm) SetHeaderWriter(w HeaderWriter) { - p.objWriter = &partWriter{ - headWriter: w, - } -} diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go deleted file mode 100644 index 78ca5b5e3..000000000 --- a/pkg/services/object/get/remote.go +++ /dev/null @@ -1,163 +0,0 @@ -package getsvc - -import ( - "context" - "encoding/hex" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode") - defer span.End() - - r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) - - rs, ok := r.getRemoteStorage(ctx, info) - if !ok { - return true - } - - obj, err := r.getRemote(ctx, rs, info) - - var errSplitInfo *objectSDK.SplitInfoError - var errECInfo *objectSDK.ECInfoError - var errRemoved *apistatus.ObjectAlreadyRemoved - var errOutOfRange *apistatus.ObjectOutOfRange - var errAccessDenied *apistatus.ObjectAccessDenied - - switch { - default: - r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err)) - if r.status != statusEC { - // for raw requests, continue to collect other parts - r.status = statusUndefined - if errors.As(err, &errAccessDenied) { - r.err = err - } else if r.err == nil || !errors.As(r.err, &errAccessDenied) { - r.err = new(apistatus.ObjectNotFound) - } - } - return false - case err == nil: - r.status = statusOK - r.err = nil - - // both object and err are nil only if the original - // request was forwarded to another node and the object - // has already been streamed to the requesting party - if obj != nil { - r.collectedObject = obj - r.writeCollectedObject(ctx) - } - return true - case errors.As(err, &errRemoved): - r.status = statusINHUMED - r.err = errRemoved - return true - case errors.As(err, &errOutOfRange): - r.status = statusOutOfRange - r.err = errOutOfRange - return true - case errors.As(err, &errSplitInfo): - r.status = statusVIRTUAL - mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo()) - r.err = objectSDK.NewSplitInfoError(r.infoSplit) - return true - case errors.As(err, &errECInfo): - r.status = statusEC - r.err = r.infoEC.addRemote(string(info.PublicKey()), errECInfo.ECInfo()) - if r.isRaw() { - return false // continue to collect all parts - } - return true - } -} - -func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.NodeInfo) (*objectSDK.Object, error) { - if r.isForwardingEnabled() { - return rs.ForwardRequest(ctx, info, r.prm.forwarder) - } - - key, err := r.key() - if err != nil { - return nil, err - } - - prm := RemoteRequestParams{ - Epoch: r.curProcEpoch, - TTL: r.prm.common.TTL(), - PrivateKey: key, - SessionToken: r.prm.common.SessionToken(), - BearerToken: r.prm.common.BearerToken(), - XHeaders: r.prm.common.XHeaders(), - IsRaw: r.isRaw(), - } - - if r.headOnly() { - return rs.Head(ctx, r.address(), prm) - } - // we don't specify payload writer because we accumulate - // the object locally (even huge). - if rng := r.ctxRange(); rng != nil { - // Current spec allows other storage node to deny access, - // fallback to GET here. - return rs.Range(ctx, r.address(), rng, prm) - } - - return rs.Get(ctx, r.address(), prm) -} - -func (r *request) getObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo) (*objectSDK.Object, error) { - rs, err := r.remoteStorageConstructor.Get(info) - if err != nil { - return nil, err - } - - key, err := r.key() - if err != nil { - return nil, err - } - - prm := RemoteRequestParams{ - Epoch: r.curProcEpoch, - TTL: 1, - PrivateKey: key, - SessionToken: r.prm.common.SessionToken(), - BearerToken: r.prm.common.BearerToken(), - XHeaders: r.prm.common.XHeaders(), - } - - return rs.Get(ctx, addr, prm) -} - -func (r *request) headObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo, raw bool) (*objectSDK.Object, error) { - rs, err := r.remoteStorageConstructor.Get(info) - if err != nil { - return nil, err - } - - key, err := r.key() - if err != nil { - return nil, err - } - - prm := RemoteRequestParams{ - Epoch: r.curProcEpoch, - TTL: 1, - PrivateKey: key, - SessionToken: r.prm.common.SessionToken(), - BearerToken: r.prm.common.BearerToken(), - XHeaders: r.prm.common.XHeaders(), - IsRaw: raw, - } - - return rs.Head(ctx, addr, prm) -} diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go deleted file mode 100644 index 2c64244cf..000000000 --- a/pkg/services/object/get/remote_getter.go +++ /dev/null @@ -1,55 +0,0 @@ -package getsvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type RemoteGetPrm struct { - Address oid.Address - Node netmapSDK.NodeInfo -} - -type RemoteGetter struct { - s remoteStorageConstructor - es epochSource - ks keyStorage -} - -func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Object, error) { - var nodeInfo client.NodeInfo - if err := client.NodeInfoFromRawNetmapElement(&nodeInfo, netmapCore.Node(prm.Node)); err != nil { - return nil, err - } - rs, err := g.s.Get(nodeInfo) - if err != nil { - return nil, err - } - epoch, err := g.es.Epoch(ctx) - if err != nil { - return nil, err - } - key, err := g.ks.GetKey(nil) - if err != nil { - return nil, err - } - r := RemoteRequestParams{ - Epoch: epoch, - TTL: 1, - PrivateKey: key, - } - return rs.Get(ctx, prm.Address, r) -} - -func NewRemoteGetter(cc clientConstructor, es epochSource, ks keyStorage) *RemoteGetter { - return &RemoteGetter{ - s: &multiclientRemoteStorageConstructor{clientConstructor: cc}, - es: es, - ks: ks, - } -} diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go deleted file mode 100644 index 268080486..000000000 --- a/pkg/services/object/get/request.go +++ /dev/null @@ -1,248 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type request struct { - prm RequestParameters - - statusError - - infoSplit *objectSDK.SplitInfo - - infoEC *ecInfo - - log *logger.Logger - - collectedObject *objectSDK.Object - - curProcEpoch uint64 - - keyStore keyStorage - epochSource epochSource - traverserGenerator traverserGenerator - remoteStorageConstructor remoteStorageConstructor - localStorage localStorage - containerSource container.Source -} - -func (r *request) setLogger(l *logger.Logger) { - req := "GET" - if r.headOnly() { - req = "HEAD" - } else if r.ctxRange() != nil { - req = "GET_RANGE" - } - - r.log = l.With( - zap.String("request", req), - zap.Stringer("address", r.address()), - zap.Bool("raw", r.isRaw()), - zap.Bool("local", r.isLocal()), - zap.Bool("with session", r.prm.common.SessionToken() != nil), - zap.Bool("with bearer", r.prm.common.BearerToken() != nil), - ) -} - -func (r *request) isLocal() bool { - return r.prm.common.LocalOnly() -} - -func (r *request) isRaw() bool { - return r.prm.raw -} - -func (r *request) address() oid.Address { - return r.prm.addr -} - -func (r *request) key() (*ecdsa.PrivateKey, error) { - if r.prm.signerKey != nil { - // the key has already been requested and - // cached in the previous operations - return r.prm.signerKey, nil - } - - var sessionInfo *util.SessionInfo - - if tok := r.prm.common.SessionToken(); tok != nil { - sessionInfo = &util.SessionInfo{ - ID: tok.ID(), - Owner: tok.Issuer(), - } - } - - return r.keyStore.GetKey(sessionInfo) -} - -func (r *request) canAssembleComplexObject() bool { - return !r.isRaw() -} - -func (r *request) splitInfo() *objectSDK.SplitInfo { - return r.infoSplit -} - -func (r *request) containerID() cid.ID { - return r.address().Container() -} - -func (r *request) ctxRange() *objectSDK.Range { - return r.prm.rng -} - -func (r *request) headOnly() bool { - return r.prm.head -} - -func (r *request) netmapEpoch() uint64 { - return r.prm.common.NetmapEpoch() -} - -func (r *request) netmapLookupDepth() uint64 { - return r.prm.common.NetmapLookupDepth() -} - -func (r *request) initEpoch(ctx context.Context) bool { - r.curProcEpoch = r.netmapEpoch() - if r.curProcEpoch > 0 { - return true - } - - e, err := r.epochSource.Epoch(ctx) - - switch { - default: - r.status = statusUndefined - r.err = err - - r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) - - return false - case err == nil: - r.curProcEpoch = e - return true - } -} - -func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { - obj := addr.Object() - - t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch) - - switch { - default: - r.status = statusUndefined - r.err = err - - r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) - - return nil, false - case err == nil: - return t, true - } -} - -func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) { - rs, err := r.remoteStorageConstructor.Get(info) - if err != nil { - r.status = statusUndefined - r.err = err - - r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient) - - return nil, false - } - - return rs, true -} - -func (r *request) writeCollectedHeader(ctx context.Context) bool { - if r.ctxRange() != nil { - return true - } - - err := r.prm.objWriter.WriteHeader( - ctx, - r.collectedObject.CutPayload(), - ) - - switch { - default: - r.status = statusUndefined - r.err = err - - r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err)) - case err == nil: - r.status = statusOK - r.err = nil - } - - return r.status == statusOK -} - -func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) bool { - if r.headOnly() { - return true - } - - err := r.prm.objWriter.WriteChunk(ctx, obj.Payload()) - - switch { - default: - r.status = statusUndefined - r.err = err - - r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err)) - case err == nil: - r.status = statusOK - r.err = nil - } - - return err == nil -} - -func (r *request) writeCollectedObject(ctx context.Context) { - if ok := r.writeCollectedHeader(ctx); ok { - r.writeObjectPayload(ctx, r.collectedObject) - } -} - -// isForwardingEnabled returns true if common execution -// parameters has request forwarding closure set. -func (r request) isForwardingEnabled() bool { - return r.prm.forwarder != nil -} - -// disableForwarding removes request forwarding closure from common -// parameters, so it won't be inherited in new execution contexts. -func (r *request) disableForwarding() { - r.prm.SetRequestForwarder(nil) -} - -func mergeSplitInfo(dst, src *objectSDK.SplitInfo) { - if last, ok := src.LastPart(); ok { - dst.SetLastPart(last) - } - - if link, ok := src.Link(); ok { - dst.SetLink(link) - } - - if splitID := src.SplitID(); splitID != nil { - dst.SetSplitID(splitID) - } -} diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go deleted file mode 100644 index a103f5a7f..000000000 --- a/pkg/services/object/get/service.go +++ /dev/null @@ -1,58 +0,0 @@ -package getsvc - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Option is a Service's constructor option. -type Option func(*Service) - -// Service utility serving requests of Object.Get service. -type Service struct { - log *logger.Logger - localStorage localStorage - traverserGenerator traverserGenerator - epochSource epochSource - keyStore keyStorage - remoteStorageConstructor remoteStorageConstructor - containerSource container.Source -} - -// New creates, initializes and returns utility serving -// Object.Get service requests. -func New( - ks keyStorage, - es epochSource, - e localStorageEngine, - tg traverserGenerator, - cc clientConstructor, - cs container.Source, - opts ...Option, -) *Service { - result := &Service{ - keyStore: ks, - epochSource: es, - log: logger.NewLoggerWrapper(zap.L()), - localStorage: &engineLocalStorage{ - engine: e, - }, - traverserGenerator: tg, - remoteStorageConstructor: &multiclientRemoteStorageConstructor{ - clientConstructor: cc, - }, - containerSource: cs, - } - for _, option := range opts { - option(result) - } - return result -} - -// WithLogger returns option to specify Get service's logger. -func WithLogger(l *logger.Logger) Option { - return func(s *Service) { - s.log = l - } -} diff --git a/pkg/services/object/get/status.go b/pkg/services/object/get/status.go deleted file mode 100644 index 919338d7f..000000000 --- a/pkg/services/object/get/status.go +++ /dev/null @@ -1,15 +0,0 @@ -package getsvc - -const ( - statusUndefined int = iota - statusOK - statusINHUMED - statusVIRTUAL - statusOutOfRange - statusEC -) - -type statusError struct { - status int - err error -} diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go deleted file mode 100644 index 664366d1b..000000000 --- a/pkg/services/object/get/types.go +++ /dev/null @@ -1,287 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - "errors" - - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -type epochSource interface { - Epoch(ctx context.Context) (uint64, error) -} - -type traverserGenerator interface { - GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) -} - -type keyStorage interface { - GetKey(info *util.SessionInfo) (*ecdsa.PrivateKey, error) -} - -type localStorageEngine interface { - Head(ctx context.Context, p engine.HeadPrm) (engine.HeadRes, error) - GetRange(ctx context.Context, p engine.RngPrm) (engine.RngRes, error) - Get(ctx context.Context, p engine.GetPrm) (engine.GetRes, error) -} - -type clientConstructor interface { - Get(coreclient.NodeInfo) (coreclient.MultiAddressClient, error) -} - -type remoteStorageConstructor interface { - Get(coreclient.NodeInfo) (remoteStorage, error) -} - -type multiclientRemoteStorageConstructor struct { - clientConstructor clientConstructor -} - -func (c *multiclientRemoteStorageConstructor) Get(info coreclient.NodeInfo) (remoteStorage, error) { - clt, err := c.clientConstructor.Get(info) - if err != nil { - return nil, err - } - - return &multiaddressRemoteStorage{ - client: clt, - }, nil -} - -type localStorage interface { - Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) - Range(ctx context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) - Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) -} - -type engineLocalStorage struct { - engine localStorageEngine -} - -func (s *engineLocalStorage) Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) { - var headPrm engine.HeadPrm - headPrm.WithAddress(address) - headPrm.WithRaw(isRaw) - - r, err := s.engine.Head(ctx, headPrm) - if err != nil { - return nil, err - } - - return r.Header(), nil -} - -func (s *engineLocalStorage) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) { - var getRange engine.RngPrm - getRange.WithAddress(address) - getRange.WithPayloadRange(rng) - - r, err := s.engine.GetRange(ctx, getRange) - if err != nil { - return nil, err - } - - return r.Object(), nil -} - -func (s *engineLocalStorage) Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) { - var getPrm engine.GetPrm - getPrm.WithAddress(address) - - r, err := s.engine.Get(ctx, getPrm) - if err != nil { - return nil, err - } - - return r.Object(), nil -} - -type RemoteRequestParams struct { - Epoch uint64 - TTL uint32 - PrivateKey *ecdsa.PrivateKey - SessionToken *session.Object - BearerToken *bearer.Token - XHeaders []string - IsRaw bool -} - -type remoteStorage interface { - Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) - Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) - Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) - - ForwardRequest(ctx context.Context, info coreclient.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) -} - -type multiaddressRemoteStorage struct { - client coreclient.MultiAddressClient -} - -func (s *multiaddressRemoteStorage) ForwardRequest(ctx context.Context, info coreclient.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) { - return forwarder(ctx, info, s.client) -} - -func (s *multiaddressRemoteStorage) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) { - var prm internalclient.PayloadRangePrm - - prm.SetClient(s.client) - prm.SetTTL(requestParams.TTL) - prm.SetNetmapEpoch(requestParams.Epoch) - prm.SetAddress(address) - prm.SetPrivateKey(requestParams.PrivateKey) - prm.SetSessionToken(requestParams.SessionToken) - prm.SetBearerToken(requestParams.BearerToken) - prm.SetXHeaders(requestParams.XHeaders) - prm.SetRange(rng) - if requestParams.IsRaw { - prm.SetRawFlag() - } - - res, err := internalclient.PayloadRange(ctx, prm) - if err != nil { - var errAccessDenied *apistatus.ObjectAccessDenied - if errors.As(err, &errAccessDenied) { - obj, err := s.Get(ctx, address, requestParams) - if err != nil { - return nil, err - } - - payload := obj.Payload() - from := rng.GetOffset() - to := from + rng.GetLength() - - if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { - return nil, new(apistatus.ObjectOutOfRange) - } - - return s.payloadOnlyObject(payload[from:to]), nil - } - return nil, err - } - - return s.payloadOnlyObject(res.PayloadRange()), nil -} - -func (s *multiaddressRemoteStorage) Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { - var prm internalclient.HeadObjectPrm - - prm.SetClient(s.client) - prm.SetTTL(requestParams.TTL) - prm.SetNetmapEpoch(requestParams.Epoch) - prm.SetAddress(address) - prm.SetPrivateKey(requestParams.PrivateKey) - prm.SetSessionToken(requestParams.SessionToken) - prm.SetBearerToken(requestParams.BearerToken) - prm.SetXHeaders(requestParams.XHeaders) - - if requestParams.IsRaw { - prm.SetRawFlag() - } - - res, err := internalclient.HeadObject(ctx, prm) - if err != nil { - return nil, err - } - - return res.Header(), nil -} - -func (s *multiaddressRemoteStorage) Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { - var prm internalclient.GetObjectPrm - - prm.SetClient(s.client) - prm.SetTTL(requestParams.TTL) - prm.SetNetmapEpoch(requestParams.Epoch) - prm.SetAddress(address) - prm.SetPrivateKey(requestParams.PrivateKey) - prm.SetSessionToken(requestParams.SessionToken) - prm.SetBearerToken(requestParams.BearerToken) - prm.SetXHeaders(requestParams.XHeaders) - - if requestParams.IsRaw { - prm.SetRawFlag() - } - - res, err := internalclient.GetObject(ctx, prm) - if err != nil { - return nil, err - } - - return res.Object(), nil -} - -func (s *multiaddressRemoteStorage) payloadOnlyObject(payload []byte) *objectSDK.Object { - obj := objectSDK.New() - obj.SetPayload(payload) - - return obj -} - -type RangeHashRes struct { - hashes [][]byte -} - -func (r *RangeHashRes) Hashes() [][]byte { - return r.hashes -} - -type ecInfo struct { - localChunks []objectSDK.ECChunk - remoteChunks map[string][]objectSDK.ECChunk // node pk -> chunk slice -} - -func newECInfo() *ecInfo { - return &ecInfo{ - localChunks: make([]objectSDK.ECChunk, 0), - remoteChunks: make(map[string][]objectSDK.ECChunk), - } -} - -func (e *ecInfo) addLocal(ecInfo *objectSDK.ECInfo) *objectSDK.ECInfoError { - for _, ch := range ecInfo.Chunks { - e.localChunks = append(e.localChunks, objectSDK.ECChunk(ch)) - } - return e.createECInfoErr() -} - -func (e *ecInfo) addRemote(nodePK string, ecInfo *objectSDK.ECInfo) *objectSDK.ECInfoError { - for _, ch := range ecInfo.Chunks { - e.remoteChunks[nodePK] = append(e.remoteChunks[nodePK], objectSDK.ECChunk(ch)) - } - return e.createECInfoErr() -} - -func (e *ecInfo) createECInfoErr() *objectSDK.ECInfoError { - unique := make(map[string]struct{}) - result := objectSDK.NewECInfo() - for _, ch := range e.localChunks { - if _, found := unique[string(ch.ID.GetValue())]; found { - continue - } - result.AddChunk(ch) - unique[string(ch.ID.GetValue())] = struct{}{} - } - for _, chunks := range e.remoteChunks { - for _, ch := range chunks { - if _, found := unique[string(ch.ID.GetValue())]; found { - continue - } - result.AddChunk(ch) - unique[string(ch.ID.GetValue())] = struct{}{} - } - } - return objectSDK.NewECInfoError(result) -} diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go deleted file mode 100644 index aaa09b891..000000000 --- a/pkg/services/object/get/v2/errors.go +++ /dev/null @@ -1,88 +0,0 @@ -package getsvc - -import ( - "errors" - "fmt" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" -) - -var ( - errMissingObjAddress = errors.New("missing object address") - errWrongMessageSeq = errors.New("incorrect message sequence") - errNilObjectPart = errors.New("nil object part") - errMissingSignature = errors.New("missing signature") - errInvalidObjectIDSign = errors.New("invalid object ID signature") - - errWrongHeaderPartTypeExpShortRecvWithSignature = fmt.Errorf("wrong header part type: expected %T, received %T", - (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil), - ) - errWrongHeaderPartTypeExpWithSignRecvShort = fmt.Errorf("wrong header part type: expected %T, received %T", - (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil), - ) -) - -func errInvalidObjAddress(err error) error { - return fmt.Errorf("invalid object address: %w", err) -} - -func errRequestParamsValidation(err error) error { - return fmt.Errorf("request params validation: %w", err) -} - -func errFetchingSessionKey(err error) error { - return fmt.Errorf("fetching session key: %w", err) -} - -func errUnknownChechsumType(t refs.ChecksumType) error { - return fmt.Errorf("unknown checksum type %v", t) -} - -func errResponseVerificationFailed(err error) error { - return fmt.Errorf("response verification failed: %w", err) -} - -func errCouldNotWriteObjHeader(err error) error { - return fmt.Errorf("could not write object header in Get forwarder: %w", err) -} - -func errStreamOpenningFailed(err error) error { - return fmt.Errorf("stream opening failed: %w", err) -} - -func errReadingResponseFailed(err error) error { - return fmt.Errorf("reading the response failed: %w", err) -} - -func errUnexpectedObjectPart(v objectV2.GetObjectPart) error { - return fmt.Errorf("unexpected object part %T", v) -} - -func errCouldNotWriteObjChunk(forwarder string, err error) error { - return fmt.Errorf("could not write object chunk in %s forwarder: %w", forwarder, err) -} - -func errCouldNotCreateGetRangeStream(err error) error { - return fmt.Errorf("could not create Get payload range stream: %w", err) -} - -func errUnexpectedRangePart(v objectV2.GetRangePart) error { - return fmt.Errorf("unexpected range type %T", v) -} - -func errUnexpectedHeaderPart(v objectV2.GetHeaderPart) error { - return fmt.Errorf("unexpected header type %T", v) -} - -func errMarshalID(err error) error { - return fmt.Errorf("marshal ID: %w", err) -} - -func errCantReadSignature(err error) error { - return fmt.Errorf("can't read signature: %w", err) -} - -func errSendingRequestFailed(err error) error { - return fmt.Errorf("sending the request failed: %w", err) -} diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go deleted file mode 100644 index 60fcd7fbf..000000000 --- a/pkg/services/object/get/v2/get_forwarder.go +++ /dev/null @@ -1,179 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - "errors" - "io" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type getRequestForwarder struct { - OnceResign sync.Once - GlobalProgress int - Key *ecdsa.PrivateKey - Request *objectV2.GetRequest - Stream *streamObjectWriter - - headerSent bool - headerSentGuard sync.Mutex -} - -func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "getRequestForwarder.forwardRequestToNode", - trace.WithAttributes(attribute.String("address", addr.String())), - ) - defer span.End() - - var err error - - // once compose and resign forwarding request - f.OnceResign.Do(func() { - // compose meta header of the local server - metaHdr := new(session.RequestMetaHeader) - metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1) - // TODO: #1165 think how to set the other fields - metaHdr.SetOrigin(f.Request.GetMetaHeader()) - writeCurrentVersion(metaHdr) - f.Request.SetMetaHeader(metaHdr) - err = signature.SignServiceMessage(f.Key, f.Request) - }) - - if err != nil { - return nil, err - } - - getStream, err := f.openStream(ctx, addr, c) - if err != nil { - return nil, err - } - return nil, f.readStream(ctx, c, getStream, pubkey) -} - -func (f *getRequestForwarder) verifyResponse(resp *objectV2.GetResponse, pubkey []byte) error { - // verify response key - if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil { - return err - } - - // verify response structure - if err := signature.VerifyServiceMessage(resp); err != nil { - return errResponseVerificationFailed(err) - } - - return checkStatus(resp.GetMetaHeader().GetStatus()) -} - -func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetObjectPartInit) error { - obj := new(objectV2.Object) - - obj.SetObjectID(v.GetObjectID()) - obj.SetSignature(v.GetSignature()) - obj.SetHeader(v.GetHeader()) - - f.headerSentGuard.Lock() - defer f.headerSentGuard.Unlock() - if f.headerSent { - return nil - } - if err := f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)); err != nil { - return errCouldNotWriteObjHeader(err) - } - f.headerSent = true - return nil -} - -func (f *getRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.GetResponseReader, error) { - var getStream *rpc.GetResponseReader - err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { - var e error - getStream, e = rpc.GetObject(cli, f.Request, rpcclient.WithContext(ctx)) - return e - }) - if err != nil { - return nil, errStreamOpenningFailed(err) - } - return getStream, nil -} - -func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddressClient, getStream *rpc.GetResponseReader, pubkey []byte) error { - var ( - headWas bool - resp = new(objectV2.GetResponse) - localProgress int - ) - - for { - // receive message from server stream - err := getStream.Read(resp) - if err != nil { - if errors.Is(err, io.EOF) { - if !headWas { - return io.ErrUnexpectedEOF - } - - break - } - - internalclient.ReportError(c, err) - return errReadingResponseFailed(err) - } - - if err := f.verifyResponse(resp, pubkey); err != nil { - return err - } - - switch v := resp.GetBody().GetObjectPart().(type) { - default: - return errUnexpectedObjectPart(v) - case *objectV2.GetObjectPartInit: - if headWas { - return errWrongMessageSeq - } - headWas = true - if err := f.writeHeader(ctx, v); err != nil { - return err - } - case *objectV2.GetObjectPartChunk: - if !headWas { - return errWrongMessageSeq - } - - origChunk := v.GetChunk() - - chunk := chunkToSend(f.GlobalProgress, localProgress, origChunk) - if len(chunk) == 0 { - localProgress += len(origChunk) - continue - } - - if err = f.Stream.WriteChunk(ctx, chunk); err != nil { - return errCouldNotWriteObjChunk("Get", err) - } - - localProgress += len(origChunk) - f.GlobalProgress += len(chunk) - case *objectV2.SplitInfo: - si := objectSDK.NewSplitInfoFromV2(v) - return objectSDK.NewSplitInfoError(si) - case *objectV2.ECInfo: - ei := objectSDK.NewECInfoFromV2(v) - return objectSDK.NewECInfoError(ei) - } - } - return nil -} diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go deleted file mode 100644 index a44616fc9..000000000 --- a/pkg/services/object/get/v2/get_range_forwarder.go +++ /dev/null @@ -1,126 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - "errors" - "io" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type getRangeRequestForwarder struct { - OnceResign sync.Once - GlobalProgress int - Key *ecdsa.PrivateKey - Request *objectV2.GetRangeRequest - Stream *streamObjectRangeWriter -} - -func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "getRangeRequestForwarder.forwardRequestToNode", - trace.WithAttributes(attribute.String("address", addr.String())), - ) - defer span.End() - - var err error - - // once compose and resign forwarding request - f.OnceResign.Do(func() { - // compose meta header of the local server - metaHdr := new(session.RequestMetaHeader) - metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1) - // TODO: #1165 think how to set the other fields - metaHdr.SetOrigin(f.Request.GetMetaHeader()) - writeCurrentVersion(metaHdr) - - f.Request.SetMetaHeader(metaHdr) - - err = signature.SignServiceMessage(f.Key, f.Request) - }) - - if err != nil { - return nil, err - } - - rangeStream, err := f.openStream(ctx, addr, c) - if err != nil { - return nil, err - } - - return nil, f.readStream(ctx, rangeStream, c, pubkey) -} - -func (f *getRangeRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.ObjectRangeResponseReader, error) { - // open stream - var rangeStream *rpc.ObjectRangeResponseReader - err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { - var e error - rangeStream, e = rpc.GetObjectRange(cli, f.Request, rpcclient.WithContext(ctx)) - return e - }) - if err != nil { - return nil, errCouldNotCreateGetRangeStream(err) - } - return rangeStream, nil -} - -func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream *rpc.ObjectRangeResponseReader, c client.MultiAddressClient, pubkey []byte) error { - resp := new(objectV2.GetRangeResponse) - var localProgress int - - for { - // receive message from server stream - err := rangeStream.Read(resp) - if err != nil { - if errors.Is(err, io.EOF) { - break - } - internalclient.ReportError(c, err) - return errReadingResponseFailed(err) - } - - if err := verifyResponse(resp, pubkey); err != nil { - return err - } - - switch v := resp.GetBody().GetRangePart().(type) { - case nil: - return errUnexpectedRangePart(v) - case *objectV2.GetRangePartChunk: - origChunk := v.GetChunk() - - chunk := chunkToSend(f.GlobalProgress, localProgress, origChunk) - if len(chunk) == 0 { - localProgress += len(origChunk) - continue - } - - if err = f.Stream.WriteChunk(ctx, chunk); err != nil { - return errCouldNotWriteObjChunk("GetRange", err) - } - - localProgress += len(origChunk) - f.GlobalProgress += len(chunk) - case *objectV2.SplitInfo: - si := objectSDK.NewSplitInfoFromV2(v) - return objectSDK.NewSplitInfoError(si) - case *objectV2.ECInfo: - ei := objectSDK.NewECInfoFromV2(v) - return objectSDK.NewECInfoError(ei) - } - } - return nil -} diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go deleted file mode 100644 index 308ccd512..000000000 --- a/pkg/services/object/get/v2/get_range_hash.go +++ /dev/null @@ -1,218 +0,0 @@ -package getsvc - -import ( - "context" - "encoding/hex" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -// GetRangeHash calls internal service and returns v2 response. -func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - forward, err := s.needToForwardGetRangeHashRequest(ctx, req) - if err != nil { - return nil, err - } - if forward.needToForward { - return s.forwardGetRangeHashRequest(ctx, req, forward) - } - p, err := s.toHashRangePrm(req) - if err != nil { - return nil, err - } - - res, err := s.svc.GetRangeHash(ctx, *p) - if err != nil { - return nil, err - } - - return toHashResponse(req.GetBody().GetType(), res), nil -} - -type getRangeForwardParams struct { - needToForward bool - containerNodes []netmapSDK.NodeInfo - address oid.Address -} - -func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { - if req.GetMetaHeader().GetTTL() <= 1 { - return getRangeForwardParams{}, nil - } - - var result getRangeForwardParams - addrV2 := req.GetBody().GetAddress() - if addrV2 == nil { - return result, errMissingObjAddress - } - - var addr oid.Address - err := addr.ReadFromV2(*addrV2) - if err != nil { - return result, errInvalidObjAddress(err) - } - result.address = addr - - cont, err := s.contSource.Get(ctx, addr.Container()) - if err != nil { - return result, fmt.Errorf("(%T) could not get container: %w", s, err) - } - - epoch, err := s.netmapSource.Epoch(ctx) - if err != nil { - return result, fmt.Errorf("(%T) could not get epoch: %w", s, err) - } - - nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch) - if err != nil { - return result, fmt.Errorf("(%T) could not get netmap: %w", s, err) - } - - builder := placement.NewNetworkMapBuilder(nm) - - objectID := addr.Object() - nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy()) - if err != nil { - return result, fmt.Errorf("(%T) could not build object placement: %w", s, err) - } - result.containerNodes = distinctBy(placement.FlattenNodes(nodesVector), func(n netmapSDK.NodeInfo) string { return hex.EncodeToString(n.PublicKey()) }) - - for _, node := range result.containerNodes { - if s.announcedKeys.IsLocalKey(node.PublicKey()) { - return result, nil - } - } - result.needToForward = true - return result, nil -} - -func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest, params getRangeForwardParams) (*objectV2.GetRangeHashResponse, error) { - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return nil, err - } - - metaHdr := new(session.RequestMetaHeader) - metaHdr.SetTTL(req.GetMetaHeader().GetTTL() - 1) - metaHdr.SetOrigin(req.GetMetaHeader()) - writeCurrentVersion(metaHdr) - req.SetMetaHeader(metaHdr) - - if err := signature.SignServiceMessage(key, req); err != nil { - return nil, err - } - - var firstErr error - for _, node := range params.containerNodes { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - var addrGr network.AddressGroup - if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil { - s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) - continue - } - - var extAddr network.AddressGroup - if len(node.ExternalAddresses()) > 0 { - if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil { - s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) - continue - } - } - - var info clientCore.NodeInfo - clientCore.NodeInfoFromNetmapElement(&info, placement.NewNode(addrGr, extAddr, node.PublicKey())) - - resp, err := s.performGetRangeHashOnNode(ctx, req, info) - if err == nil { - if err := verifyResponse(resp, info.PublicKey()); err != nil { - return nil, err - } - return resp, nil - } - if firstErr == nil { - firstErr = err - } - s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode, - zap.String("node_public_key", hex.EncodeToString(node.PublicKey())), - zap.Stringer("address", params.address), - zap.Error(err)) - } - s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr)) - if firstErr != nil { - return nil, firstErr - } - return nil, new(apistatus.ObjectNotFound) -} - -func (s *Service) performGetRangeHashOnNode(ctx context.Context, req *objectV2.GetRangeHashRequest, info clientCore.NodeInfo) (*objectV2.GetRangeHashResponse, error) { - cl, err := s.clientSource.Get(info) - if err != nil { - return nil, err - } - - var firstErr error - var resp *objectV2.GetRangeHashResponse - info.AddressGroup().IterateAddresses(func(a network.Address) bool { - resp, err = s.performGetRangeHashOnAddress(ctx, req, cl, a) - if err != nil { - if firstErr == nil { - firstErr = err - } - return false - } - return true - }) - if firstErr != nil { - return nil, firstErr - } - if resp == nil { - return nil, new(apistatus.ObjectNotFound) - } - return resp, nil -} - -func (s *Service) performGetRangeHashOnAddress(ctx context.Context, req *objectV2.GetRangeHashRequest, cl clientCore.MultiAddressClient, - a network.Address, -) (*objectV2.GetRangeHashResponse, error) { - var resp *objectV2.GetRangeHashResponse - var rpcErr error - err := cl.RawForAddress(ctx, a, func(cli *rpcclient.Client) error { - resp, rpcErr = rpc.HashObjectRange(cli, req, rpcclient.WithContext(ctx)) - return rpcErr - }) - if err != nil { - return nil, err - } - return resp, err -} - -func distinctBy[T any, K comparable](source []T, keySelector func(v T) K) []T { - var result []T - dict := make(map[K]struct{}) - for _, v := range source { - key := keySelector(v) - if _, exists := dict[key]; !exists { - result = append(result, v) - dict[key] = struct{}{} - } - } - return result -} diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go deleted file mode 100644 index 56056398d..000000000 --- a/pkg/services/object/get/v2/head_forwarder.go +++ /dev/null @@ -1,160 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type headRequestForwarder struct { - Request *objectV2.HeadRequest - OnceResign sync.Once - ObjectAddr oid.Address - Key *ecdsa.PrivateKey -} - -func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "headRequestForwarder.forwardRequestToNode", - trace.WithAttributes(attribute.String("address", addr.String())), - ) - defer span.End() - - var err error - - // once compose and resign forwarding request - f.OnceResign.Do(func() { - // compose meta header of the local server - metaHdr := new(session.RequestMetaHeader) - metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1) - // TODO: #1165 think how to set the other fields - metaHdr.SetOrigin(f.Request.GetMetaHeader()) - writeCurrentVersion(metaHdr) - - f.Request.SetMetaHeader(metaHdr) - - err = signature.SignServiceMessage(f.Key, f.Request) - }) - - if err != nil { - return nil, err - } - - headResp, err := f.sendHeadRequest(ctx, addr, c) - if err != nil { - return nil, err - } - - if err := verifyResponse(headResp, pubkey); err != nil { - return nil, err - } - - var ( - hdr *objectV2.Header - idSig *refs.Signature - ) - - switch v := headResp.GetBody().GetHeaderPart().(type) { - case nil: - return nil, errUnexpectedHeaderPart(v) - case *objectV2.ShortHeader: - if hdr, err = f.getHeaderFromShortHeader(v); err != nil { - return nil, err - } - case *objectV2.HeaderWithSignature: - if hdr, idSig, err = f.getHeaderAndSignature(v); err != nil { - return nil, err - } - case *objectV2.SplitInfo: - si := objectSDK.NewSplitInfoFromV2(v) - return nil, objectSDK.NewSplitInfoError(si) - case *objectV2.ECInfo: - ei := objectSDK.NewECInfoFromV2(v) - return nil, objectSDK.NewECInfoError(ei) - } - - objv2 := new(objectV2.Object) - objv2.SetHeader(hdr) - objv2.SetSignature(idSig) - - obj := objectSDK.NewFromV2(objv2) - obj.SetID(f.ObjectAddr.Object()) - - return obj, nil -} - -func (f *headRequestForwarder) getHeaderFromShortHeader(sh *objectV2.ShortHeader) (*objectV2.Header, error) { - if !f.Request.GetBody().GetMainOnly() { - return nil, errWrongHeaderPartTypeExpShortRecvWithSignature - } - - hdr := new(objectV2.Header) - hdr.SetPayloadLength(sh.GetPayloadLength()) - hdr.SetVersion(sh.GetVersion()) - hdr.SetOwnerID(sh.GetOwnerID()) - hdr.SetObjectType(sh.GetObjectType()) - hdr.SetCreationEpoch(sh.GetCreationEpoch()) - hdr.SetPayloadHash(sh.GetPayloadHash()) - hdr.SetHomomorphicHash(sh.GetHomomorphicHash()) - return hdr, nil -} - -func (f *headRequestForwarder) getHeaderAndSignature(hdrWithSig *objectV2.HeaderWithSignature) (*objectV2.Header, *refs.Signature, error) { - if f.Request.GetBody().GetMainOnly() { - return nil, nil, errWrongHeaderPartTypeExpWithSignRecvShort - } - - if hdrWithSig == nil { - return nil, nil, errNilObjectPart - } - - hdr := hdrWithSig.GetHeader() - idSig := hdrWithSig.GetSignature() - - if idSig == nil { - return nil, nil, errMissingSignature - } - - binID, err := f.ObjectAddr.Object().Marshal() - if err != nil { - return nil, nil, errMarshalID(err) - } - - var sig frostfscrypto.Signature - if err := sig.ReadFromV2(*idSig); err != nil { - return nil, nil, errCantReadSignature(err) - } - - if !sig.Verify(binID) { - return nil, nil, errInvalidObjectIDSign - } - - return hdr, idSig, nil -} - -func (f *headRequestForwarder) sendHeadRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*objectV2.HeadResponse, error) { - var headResp *objectV2.HeadResponse - err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { - var e error - headResp, e = rpc.HeadObject(cli, f.Request, rpcclient.WithContext(ctx)) - return e - }) - if err != nil { - return nil, errSendingRequestFailed(err) - } - return headResp, nil -} diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go deleted file mode 100644 index 0ec8912fd..000000000 --- a/pkg/services/object/get/v2/service.go +++ /dev/null @@ -1,150 +0,0 @@ -package getsvc - -import ( - "context" - "errors" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.uber.org/zap" -) - -// Service implements Get operation of Object service v2. -type Service struct { - *cfg -} - -// Option represents Service constructor option. -type Option func(*cfg) - -type clientSource interface { - Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error) -} - -type cfg struct { - svc *getsvc.Service - - keyStorage *objutil.KeyStorage - - clientSource clientSource - - netmapSource netmap.Source - - announcedKeys netmap.AnnouncedKeys - - contSource container.Source - - log *logger.Logger -} - -// NewService constructs Service instance from provided options. -func NewService(svc *getsvc.Service, - keyStorage *objutil.KeyStorage, - clientSource clientSource, - netmapSource netmap.Source, - announcedKeys netmap.AnnouncedKeys, - contSource container.Source, - opts ...Option, -) *Service { - c := &cfg{ - svc: svc, - keyStorage: keyStorage, - clientSource: clientSource, - netmapSource: netmapSource, - announcedKeys: announcedKeys, - contSource: contSource, - log: logger.NewLoggerWrapper(zap.L()), - } - - for i := range opts { - opts[i](c) - } - - return &Service{ - cfg: c, - } -} - -// Get calls internal service and returns v2 object stream. -func (s *Service) Get(req *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - p, err := s.toPrm(req, stream) - if err != nil { - return err - } - - err = s.svc.Get(stream.Context(), *p) - - var splitErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - - switch { - case errors.As(err, &splitErr): - return stream.Send(splitInfoResponse(splitErr.SplitInfo())) - case errors.As(err, &ecErr): - return stream.Send(ecInfoResponse(ecErr.ECInfo())) - default: - return err - } -} - -// GetRange calls internal service and returns v2 payload range stream. -func (s *Service) GetRange(req *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - p, err := s.toRangePrm(req, stream) - if err != nil { - return err - } - - err = s.svc.GetRange(stream.Context(), *p) - - var splitErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - - switch { - case errors.As(err, &splitErr): - return stream.Send(splitInfoRangeResponse(splitErr.SplitInfo())) - case errors.As(err, &ecErr): - return stream.Send(ecInfoRangeResponse(ecErr.ECInfo())) - default: - return err - } -} - -// Head serves ForstFS API v2 compatible HEAD requests. -func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - resp := new(objectV2.HeadResponse) - resp.SetBody(new(objectV2.HeadResponseBody)) - - p, err := s.toHeadPrm(req, resp) - if err != nil { - return nil, err - } - - err = s.svc.Head(ctx, *p) - - var splitErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - - if errors.As(err, &splitErr) { - setSplitInfoHeadResponse(splitErr.SplitInfo(), resp) - err = nil - } - if errors.As(err, &ecErr) { - setECInfoHeadResponse(ecErr.ECInfo(), resp) - err = nil - } - - return resp, err -} - -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go deleted file mode 100644 index 0d73bcd4d..000000000 --- a/pkg/services/object/get/v2/streamer.go +++ /dev/null @@ -1,64 +0,0 @@ -package getsvc - -import ( - "context" - - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -type streamObjectWriter struct { - objectSvc.GetObjectStream -} - -type streamObjectRangeWriter struct { - objectSvc.GetObjectRangeStream -} - -func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Object) error { - p := new(objectV2.GetObjectPartInit) - - objV2 := obj.ToV2() - p.SetObjectID(objV2.GetObjectID()) - p.SetHeader(objV2.GetHeader()) - p.SetSignature(objV2.GetSignature()) - - return s.Send(newResponse(p)) -} - -func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { - p := new(objectV2.GetObjectPartChunk) - p.SetChunk(chunk) - - return s.Send(newResponse(p)) -} - -func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { - r := new(objectV2.GetResponse) - - body := new(objectV2.GetResponseBody) - r.SetBody(body) - - body.SetObjectPart(p) - - return r -} - -func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.Send(newRangeResponse(chunk)) -} - -func newRangeResponse(p []byte) *objectV2.GetRangeResponse { - r := new(objectV2.GetRangeResponse) - - body := new(objectV2.GetRangeResponseBody) - r.SetBody(body) - - part := new(objectV2.GetRangePartChunk) - part.SetChunk(p) - - body.SetRangePart(part) - - return r -} diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go deleted file mode 100644 index e699a3779..000000000 --- a/pkg/services/object/get/v2/util.go +++ /dev/null @@ -1,429 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/sha256" - "errors" - "hash" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "git.frostfs.info/TrueCloudLab/tzhash/tz" -) - -func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStream) (*getsvc.Prm, error) { - body := req.GetBody() - - addrV2 := body.GetAddress() - if addrV2 == nil { - return nil, errMissingObjAddress - } - - var addr oid.Address - - err := addr.ReadFromV2(*addrV2) - if err != nil { - return nil, errInvalidObjAddress(err) - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - streamWrapper := &streamObjectWriter{stream} - - p := new(getsvc.Prm) - p.SetCommonParameters(commonPrm) - - p.WithAddress(addr) - p.WithRawFlag(body.GetRaw()) - p.SetObjectWriter(streamWrapper) - - if !commonPrm.LocalOnly() { - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return nil, err - } - - forwarder := &getRequestForwarder{ - GlobalProgress: 0, - Key: key, - Request: req, - Stream: streamWrapper, - } - - p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode)) - } - - return p, nil -} - -func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) (*getsvc.RangePrm, error) { - body := req.GetBody() - - addrV2 := body.GetAddress() - if addrV2 == nil { - return nil, errMissingObjAddress - } - - var addr oid.Address - - err := addr.ReadFromV2(*addrV2) - if err != nil { - return nil, errInvalidObjAddress(err) - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - p := new(getsvc.RangePrm) - p.SetCommonParameters(commonPrm) - - streamWrapper := &streamObjectRangeWriter{stream} - - p.WithAddress(addr) - p.WithRawFlag(body.GetRaw()) - p.SetChunkWriter(streamWrapper) - p.SetRange(objectSDK.NewRangeFromV2(body.GetRange())) - - err = p.Validate() - if err != nil { - return nil, errRequestParamsValidation(err) - } - - if !commonPrm.LocalOnly() { - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return nil, err - } - - forwarder := &getRangeRequestForwarder{ - GlobalProgress: 0, - Key: key, - Request: req, - Stream: streamWrapper, - } - - p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode)) - } - - return p, nil -} - -func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.RangeHashPrm, error) { - body := req.GetBody() - - addrV2 := body.GetAddress() - if addrV2 == nil { - return nil, errMissingObjAddress - } - - var addr oid.Address - - err := addr.ReadFromV2(*addrV2) - if err != nil { - return nil, errInvalidObjAddress(err) - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - p := new(getsvc.RangeHashPrm) - p.SetCommonParameters(commonPrm) - - p.WithAddress(addr) - - if tok := commonPrm.SessionToken(); tok != nil { - signerKey, err := s.keyStorage.GetKey(&util.SessionInfo{ - ID: tok.ID(), - Owner: tok.Issuer(), - }) - if err != nil && clientSDK.IsErrSessionNotFound(err) { - commonPrm.ForgetTokens() - signerKey, err = s.keyStorage.GetKey(nil) - } - - if err != nil { - return nil, errFetchingSessionKey(err) - } - - p.WithCachedSignerKey(signerKey) - } - - rngsV2 := body.GetRanges() - rngs := make([]objectSDK.Range, len(rngsV2)) - - for i := range rngsV2 { - rngs[i] = *objectSDK.NewRangeFromV2(&rngsV2[i]) - } - - p.SetRangeList(rngs) - p.SetSalt(body.GetSalt()) - - switch t := body.GetType(); t { - default: - return nil, errUnknownChechsumType(t) - case refs.SHA256: - p.SetHashGenerator(sha256.New) - case refs.TillichZemor: - p.SetHashGenerator(func() hash.Hash { - return tz.New() - }) - } - - return p, nil -} - -type headResponseWriter struct { - mainOnly bool - - body *objectV2.HeadResponseBody -} - -func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error { - if w.mainOnly { - w.body.SetHeaderPart(toShortObjectHeader(hdr)) - } else { - w.body.SetHeaderPart(toFullObjectHeader(hdr)) - } - - return nil -} - -func (s *Service) toHeadPrm(req *objectV2.HeadRequest, resp *objectV2.HeadResponse) (*getsvc.HeadPrm, error) { - body := req.GetBody() - - addrV2 := body.GetAddress() - if addrV2 == nil { - return nil, errMissingObjAddress - } - - var objAddr oid.Address - - err := objAddr.ReadFromV2(*addrV2) - if err != nil { - return nil, errInvalidObjAddress(err) - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - p := new(getsvc.HeadPrm) - p.SetCommonParameters(commonPrm) - - p.WithAddress(objAddr) - p.WithRawFlag(body.GetRaw()) - p.SetHeaderWriter(&headResponseWriter{ - mainOnly: body.GetMainOnly(), - body: resp.GetBody(), - }) - - if commonPrm.LocalOnly() { - return p, nil - } - - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return nil, err - } - - forwarder := &headRequestForwarder{ - Request: req, - ObjectAddr: objAddr, - Key: key, - } - - p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode)) - - return p, nil -} - -func splitInfoResponse(info *objectSDK.SplitInfo) *objectV2.GetResponse { - resp := new(objectV2.GetResponse) - - body := new(objectV2.GetResponseBody) - resp.SetBody(body) - - body.SetObjectPart(info.ToV2()) - - return resp -} - -func ecInfoResponse(info *objectSDK.ECInfo) *objectV2.GetResponse { - resp := new(objectV2.GetResponse) - - body := new(objectV2.GetResponseBody) - resp.SetBody(body) - - body.SetObjectPart(info.ToV2()) - - return resp -} - -func splitInfoRangeResponse(info *objectSDK.SplitInfo) *objectV2.GetRangeResponse { - resp := new(objectV2.GetRangeResponse) - - body := new(objectV2.GetRangeResponseBody) - resp.SetBody(body) - - body.SetRangePart(info.ToV2()) - - return resp -} - -func ecInfoRangeResponse(info *objectSDK.ECInfo) *objectV2.GetRangeResponse { - resp := new(objectV2.GetRangeResponse) - - body := new(objectV2.GetRangeResponseBody) - resp.SetBody(body) - - body.SetRangePart(info.ToV2()) - - return resp -} - -func setSplitInfoHeadResponse(info *objectSDK.SplitInfo, resp *objectV2.HeadResponse) { - resp.GetBody().SetHeaderPart(info.ToV2()) -} - -func setECInfoHeadResponse(info *objectSDK.ECInfo, resp *objectV2.HeadResponse) { - resp.GetBody().SetHeaderPart(info.ToV2()) -} - -func toHashResponse(typ refs.ChecksumType, res *getsvc.RangeHashRes) *objectV2.GetRangeHashResponse { - resp := new(objectV2.GetRangeHashResponse) - - body := new(objectV2.GetRangeHashResponseBody) - resp.SetBody(body) - - body.SetType(typ) - body.SetHashList(res.Hashes()) - - return resp -} - -func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart { - obj := hdr.ToV2() - - hs := new(objectV2.HeaderWithSignature) - hs.SetHeader(obj.GetHeader()) - hs.SetSignature(obj.GetSignature()) - - return hs -} - -func toShortObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart { - hdrV2 := hdr.ToV2().GetHeader() - - sh := new(objectV2.ShortHeader) - sh.SetOwnerID(hdrV2.GetOwnerID()) - sh.SetCreationEpoch(hdrV2.GetCreationEpoch()) - sh.SetPayloadLength(hdrV2.GetPayloadLength()) - sh.SetVersion(hdrV2.GetVersion()) - sh.SetObjectType(hdrV2.GetObjectType()) - sh.SetHomomorphicHash(hdrV2.GetHomomorphicHash()) - sh.SetPayloadHash(hdrV2.GetPayloadHash()) - - return sh -} - -func groupAddressRequestForwarder(f func(context.Context, network.Address, client.MultiAddressClient, []byte) (*objectSDK.Object, error)) getsvc.RequestForwarder { - return func(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) (*objectSDK.Object, error) { - var ( - firstErr error - res *objectSDK.Object - - key = info.PublicKey() - ) - - info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { - var err error - res, err = f(ctx, addr, c, key) - - // non-status logic error that could be returned - // from the SDK client; should not be considered - // as a connection error - var siErr *objectSDK.SplitInfoError - var eiErr *objectSDK.ECInfoError - - stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr) - - if stop || firstErr == nil { - firstErr = err - } - - return - }) - - return res, firstErr - } -} - -func writeCurrentVersion(metaHdr *session.RequestMetaHeader) { - versionV2 := new(refs.Version) - - apiVersion := versionSDK.Current() - apiVersion.WriteToV2(versionV2) - - metaHdr.SetVersion(versionV2) -} - -func checkStatus(stV2 *status.Status) error { - if !status.IsSuccess(stV2.Code()) { - st := apistatus.FromStatusV2(stV2) - return apistatus.ErrFromStatus(st) - } - - return nil -} - -func chunkToSend(global, local int, chunk []byte) []byte { - if global == local { - return chunk - } - - if local+len(chunk) <= global { - // chunk has already been sent - return nil - } - - return chunk[global-local:] -} - -type apiResponse interface { - GetMetaHeader() *session.ResponseMetaHeader - GetVerificationHeader() *session.ResponseVerificationHeader -} - -func verifyResponse(resp apiResponse, pubkey []byte) error { - if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil { - return err - } - - if err := signature.VerifyServiceMessage(resp); err != nil { - return errResponseVerificationFailed(err) - } - - return checkStatus(resp.GetMetaHeader().GetStatus()) -} diff --git a/pkg/services/object/get/writer.go b/pkg/services/object/get/writer.go deleted file mode 100644 index 3aa4d66ac..000000000 --- a/pkg/services/object/get/writer.go +++ /dev/null @@ -1,96 +0,0 @@ -package getsvc - -import ( - "context" - "io" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -// ChunkWriter is an interface of target component -// to write payload chunk. -type ChunkWriter interface { - WriteChunk(context.Context, []byte) error -} - -// HeaderWriter is an interface of target component -// to write object header. -type HeaderWriter interface { - WriteHeader(context.Context, *objectSDK.Object) error -} - -// ObjectWriter is an interface of target component to write object. -type ObjectWriter interface { - HeaderWriter - ChunkWriter -} - -type SimpleObjectWriter struct { - obj *objectSDK.Object - - pld []byte -} - -type partWriter struct { - ObjectWriter - - headWriter HeaderWriter - - chunkWriter ChunkWriter -} - -type hasherWrapper struct { - hash io.Writer -} - -func NewSimpleObjectWriter() *SimpleObjectWriter { - return &SimpleObjectWriter{ - obj: objectSDK.New(), - } -} - -func (s *SimpleObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Object) error { - s.obj = obj - s.pld = make([]byte, 0, obj.PayloadSize()) - return nil -} - -func (s *SimpleObjectWriter) WriteChunk(_ context.Context, p []byte) error { - s.pld = append(s.pld, p...) - return nil -} - -func (s *SimpleObjectWriter) Object() *objectSDK.Object { - if len(s.pld) > 0 { - s.obj.SetPayload(s.pld) - } - - return s.obj -} - -func (w *partWriter) WriteChunk(ctx context.Context, p []byte) error { - return w.chunkWriter.WriteChunk(ctx, p) -} - -func (w *partWriter) WriteHeader(ctx context.Context, o *objectSDK.Object) error { - return w.headWriter.WriteHeader(ctx, o) -} - -func (h *hasherWrapper) WriteChunk(_ context.Context, p []byte) error { - _, err := h.hash.Write(p) - return err -} - -type payloadWriter struct { - origin ChunkWriter - obj *objectSDK.Object -} - -func (w *payloadWriter) WriteChunk(ctx context.Context, p []byte) error { - return w.origin.WriteChunk(ctx, p) -} - -func (w *payloadWriter) WriteHeader(_ context.Context, o *objectSDK.Object) error { - w.obj = o - return nil -} diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go deleted file mode 100644 index 3e8832640..000000000 --- a/pkg/services/object/internal/client/client.go +++ /dev/null @@ -1,551 +0,0 @@ -package internal - -import ( - "bytes" - "context" - "crypto/ecdsa" - "errors" - "fmt" - "io" - "strconv" - - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -type commonPrm struct { - cli coreclient.Client - - key *ecdsa.PrivateKey - - tokenSession *session.Object - - tokenBearer *bearer.Token - - local bool - - xHeaders []string - - netmapEpoch uint64 -} - -// SetClient sets base client for ForstFS API communication. -// -// Required parameter. -func (x *commonPrm) SetClient(cli coreclient.Client) { - x.cli = cli -} - -// SetPrivateKey sets private key to sign the request(s). -// -// Required parameter. -func (x *commonPrm) SetPrivateKey(key *ecdsa.PrivateKey) { - x.key = key -} - -// SetSessionToken sets token of the session within which request should be sent. -// -// By default the request will be sent outside the session. -func (x *commonPrm) SetSessionToken(tok *session.Object) { - x.tokenSession = tok -} - -// SetBearerToken sets bearer token to be attached to the request. -// -// By default token is not attached to the request. -func (x *commonPrm) SetBearerToken(tok *bearer.Token) { - x.tokenBearer = tok -} - -// SetTTL sets time-to-live call option. -func (x *commonPrm) SetTTL(ttl uint32) { - x.local = ttl < 2 -} - -// SetXHeaders sets request X-Headers. -// -// By default X-Headers will not be attached to the request. -func (x *commonPrm) SetXHeaders(hs []string) { - x.xHeaders = hs -} - -func (x *commonPrm) calculateXHeaders() []string { - hs := x.xHeaders - if x.netmapEpoch != 0 { - hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10)) - } - return hs -} - -type readPrmCommon struct { - commonPrm -} - -// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK. -// -// By default current epoch on the server will be used. -func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) { - x.netmapEpoch = epoch -} - -// GetObjectPrm groups parameters of GetObject operation. -type GetObjectPrm struct { - readPrmCommon - - ClientParams client.PrmObjectGet - - obj oid.ID -} - -// SetRawFlag sets raw flag of the request. -// -// By default request will not be raw. -func (x *GetObjectPrm) SetRawFlag() { - x.ClientParams.Raw = true -} - -// SetAddress sets object address. -// -// Required parameter. -func (x *GetObjectPrm) SetAddress(addr oid.Address) { - x.obj = addr.Object() - cnr := addr.Container() - - x.ClientParams.ContainerID = &cnr - x.ClientParams.ObjectID = &x.obj -} - -// GetObjectRes groups the resulting values of GetObject operation. -type GetObjectRes struct { - obj *objectSDK.Object -} - -// Object returns requested objectSDK. -func (x GetObjectRes) Object() *objectSDK.Object { - return x.obj -} - -// GetObject reads the object by address. -// -// Client, context and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -// Returns: -// - error of type *objectSDK.SplitInfoError if object raw flag is set and requested object is virtual; -// - error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed. -// -// GetObject ignores the provided session if it is not related to the requested objectSDK. -func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { - // here we ignore session if it is opened for other object since such - // request will almost definitely fail. The case can occur, for example, - // when session is bound to the parent object and child object is requested. - if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) { - prm.ClientParams.Session = prm.tokenSession - } - - prm.ClientParams.XHeaders = prm.calculateXHeaders() - prm.ClientParams.BearerToken = prm.tokenBearer - prm.ClientParams.Local = prm.local - prm.ClientParams.Key = prm.key - - rdr, err := prm.cli.ObjectGetInit(ctx, prm.ClientParams) - if err != nil { - return nil, fmt.Errorf("init object reading: %w", err) - } - - var obj objectSDK.Object - - if !rdr.ReadHeader(&obj) { - res, err := rdr.Close() - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(res.Status()) - } else { - ReportError(prm.cli, err) - } - - return nil, fmt.Errorf("read object header: %w", err) - } - - buf := make([]byte, obj.PayloadSize()) - - _, err = rdr.Read(buf) - if err != nil && !errors.Is(err, io.EOF) { - return nil, fmt.Errorf("read payload: %w", err) - } - - obj.SetPayload(buf) - - return &GetObjectRes{ - obj: &obj, - }, nil -} - -// HeadObjectPrm groups parameters of HeadObject operation. -type HeadObjectPrm struct { - readPrmCommon - - ClientParams client.PrmObjectHead - - obj oid.ID -} - -// SetRawFlag sets raw flag of the request. -// -// By default request will not be raw. -func (x *HeadObjectPrm) SetRawFlag() { - x.ClientParams.Raw = true -} - -// SetAddress sets object address. -// -// Required parameter. -func (x *HeadObjectPrm) SetAddress(addr oid.Address) { - x.obj = addr.Object() - cnr := addr.Container() - - x.ClientParams.ContainerID = &cnr - x.ClientParams.ObjectID = &x.obj -} - -// HeadObjectRes groups the resulting values of GetObject operation. -type HeadObjectRes struct { - hdr *objectSDK.Object -} - -// Header returns requested object header. -func (x HeadObjectRes) Header() *objectSDK.Object { - return x.hdr -} - -// HeadObject reads object header by address. -// -// Client and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -// Returns: -// -// error of type *objectSDK.SplitInfoError if object raw flag is set and requested object is virtual; -// error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed. -// -// HeadObject ignores the provided session if it is not related to the requested objectSDK. -func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) { - // see details in same statement of GetObject - if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) { - prm.ClientParams.Session = prm.tokenSession - } - - prm.ClientParams.BearerToken = prm.tokenBearer - prm.ClientParams.Local = prm.local - prm.ClientParams.XHeaders = prm.calculateXHeaders() - - cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams) - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(cliRes.Status()) - } - - if err != nil { - return nil, fmt.Errorf("read object header from FrostFS: %w", err) - } - - var hdr objectSDK.Object - - if !cliRes.ReadHeader(&hdr) { - return nil, errors.New("missing object header in the response") - } - - return &HeadObjectRes{ - hdr: &hdr, - }, nil -} - -// PayloadRangePrm groups parameters of PayloadRange operation. -type PayloadRangePrm struct { - readPrmCommon - - ln uint64 - - ClientParams client.PrmObjectRange - - obj oid.ID -} - -// SetRawFlag sets raw flag of the request. -// -// By default request will not be raw. -func (x *PayloadRangePrm) SetRawFlag() { - x.ClientParams.Raw = true -} - -// SetAddress sets object address. -// -// Required parameter. -func (x *PayloadRangePrm) SetAddress(addr oid.Address) { - x.obj = addr.Object() - cnr := addr.Container() - - x.ClientParams.ContainerID = &cnr - x.ClientParams.ObjectID = &x.obj -} - -// SetRange range of the object payload to be read. -// -// Required parameter. -func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) { - x.ClientParams.Offset = rng.GetOffset() - x.ln = rng.GetLength() -} - -// PayloadRangeRes groups the resulting values of GetObject operation. -type PayloadRangeRes struct { - data []byte -} - -// PayloadRange returns data of the requested payload range. -func (x PayloadRangeRes) PayloadRange() []byte { - return x.data -} - -// maxInitialBufferSize is the maximum initial buffer size for PayloadRange result. -// We don't want to allocate a lot of space in advance because a query can -// fail with apistatus.ObjectOutOfRange status. -const maxInitialBufferSize = 1024 * 1024 // 1 MiB - -// PayloadRange reads object payload range by address. -// -// Client and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -// Returns: -// -// error of type *objectSDK.SplitInfoError if object raw flag is set and requested object is virtual; -// error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed; -// error of type *apistatus.ObjectOutOfRange if the requested range is too big. -// -// PayloadRange ignores the provided session if it is not related to the requested objectSDK. -func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) { - // see details in same statement of GetObject - if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) { - prm.ClientParams.Session = prm.tokenSession - } - - prm.ClientParams.XHeaders = prm.calculateXHeaders() - prm.ClientParams.BearerToken = prm.tokenBearer - prm.ClientParams.Local = prm.local - prm.ClientParams.Length = prm.ln - - rdr, err := prm.cli.ObjectRangeInit(ctx, prm.ClientParams) - if err != nil { - return nil, fmt.Errorf("init payload reading: %w", err) - } - - if int64(prm.ln) < 0 { - // `CopyN` expects `int64`, this check ensures that the result is positive. - // On practice this means that we can return incorrect results for objects - // with size > 8_388 Petabytes, this will be fixed later with support for streaming. - return nil, new(apistatus.ObjectOutOfRange) - } - - ln := min(prm.ln, maxInitialBufferSize) - - w := bytes.NewBuffer(make([]byte, 0, ln)) - _, err = io.CopyN(w, rdr, int64(prm.ln)) - if err != nil { - return nil, fmt.Errorf("read payload: %w", err) - } - - return &PayloadRangeRes{ - data: w.Bytes(), - }, nil -} - -// PutObjectPrm groups parameters of PutObject operation. -type PutObjectPrm struct { - commonPrm - - obj *objectSDK.Object -} - -// SetObject sets object to be stored. -// -// Required parameter. -func (x *PutObjectPrm) SetObject(obj *objectSDK.Object) { - x.obj = obj -} - -// PutObjectRes groups the resulting values of PutObject operation. -type PutObjectRes struct { - id oid.ID -} - -// ID returns identifier of the stored objectSDK. -func (x PutObjectRes) ID() oid.ID { - return x.id -} - -// PutObject saves the object in local storage of the remote node. -// -// Client and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObject") - defer span.End() - - prmCli := client.PrmObjectPutInit{ - XHeaders: prm.calculateXHeaders(), - BearerToken: prm.tokenBearer, - Session: prm.tokenSession, - Local: true, - Key: prm.key, - } - - w, err := prm.cli.ObjectPutInit(ctx, prmCli) - if err != nil { - return nil, fmt.Errorf("init object writing on client: %w", err) - } - - if w.WriteHeader(ctx, *prm.obj) { - w.WritePayloadChunk(ctx, prm.obj.Payload()) - } - - cliRes, err := w.Close(ctx) - if err == nil { - err = apistatus.ErrFromStatus(cliRes.Status()) - } else { - ReportError(prm.cli, err) - } - - if err != nil { - return nil, fmt.Errorf("write object via client: %w", err) - } - - return &PutObjectRes{ - id: cliRes.StoredObjectID(), - }, nil -} - -// PutObjectSingle saves the object in local storage of the remote node with PutSingle RPC. -// -// Client and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObjectSingle") - defer span.End() - - objID, isSet := prm.obj.ID() - if !isSet { - return nil, errors.New("missing object id") - } - - prmCli := client.PrmObjectPutSingle{ - XHeaders: prm.calculateXHeaders(), - BearerToken: prm.tokenBearer, - Session: prm.tokenSession, - Local: true, - Key: prm.key, - Object: prm.obj, - } - - res, err := prm.cli.ObjectPutSingle(ctx, prmCli) - if err != nil { - ReportError(prm.cli, err) - return nil, fmt.Errorf("put single object on client: %w", err) - } - - if err = apistatus.ErrFromStatus(res.Status()); err != nil { - return nil, fmt.Errorf("put single object via client: %w", err) - } - - return &PutObjectRes{ - id: objID, - }, nil -} - -// SearchObjectsPrm groups parameters of SearchObjects operation. -type SearchObjectsPrm struct { - readPrmCommon - - cliPrm client.PrmObjectSearch -} - -// SetContainerID sets identifier of the container to search the objects. -// -// Required parameter. -func (x *SearchObjectsPrm) SetContainerID(id cid.ID) { - x.cliPrm.ContainerID = &id -} - -// SetFilters sets search filters. -func (x *SearchObjectsPrm) SetFilters(fs objectSDK.SearchFilters) { - x.cliPrm.Filters = fs -} - -// SearchObjectsRes groups the resulting values of SearchObjects operation. -type SearchObjectsRes struct { - ids []oid.ID -} - -// IDList returns identifiers of the matched objects. -func (x SearchObjectsRes) IDList() []oid.ID { - return x.ids -} - -// SearchObjects selects objects from container which match the filters. -// -// Returns any error which prevented the operation from completing correctly in error return. -func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) { - prm.cliPrm.Local = prm.local - prm.cliPrm.Session = prm.tokenSession - prm.cliPrm.BearerToken = prm.tokenBearer - prm.cliPrm.XHeaders = prm.calculateXHeaders() - prm.cliPrm.Key = prm.key - - rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm) - if err != nil { - return nil, fmt.Errorf("init object searching in client: %w", err) - } - - buf := make([]oid.ID, 10) - var ids []oid.ID - var n int - var ok bool - - for { - n, ok = rdr.Read(buf) - if n > 0 { - for i := range buf[:n] { - v := buf[i] - ids = append(ids, v) - } - } - - if !ok { - break - } - } - - res, err := rdr.Close() - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(res.Status()) - } - - if err != nil { - return nil, fmt.Errorf("read object list: %w", err) - } - - return &SearchObjectsRes{ - ids: ids, - }, nil -} diff --git a/pkg/services/object/internal/client/doc.go b/pkg/services/object/internal/client/doc.go deleted file mode 100644 index 8d7203573..000000000 --- a/pkg/services/object/internal/client/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package internal provides functionality for FrostFS Node Object service communication with FrostFS network. -// The base client for accessing remote nodes via FrostFS API is a FrostFS SDK Go API client. -// However, although it encapsulates a useful piece of business logic (e.g. the signature mechanism), -// the Object service does not fully use the client's flexible interface. -// -// In this regard, this package provides functions over base API client necessary for the application. -// This allows you to concentrate the entire spectrum of the client's use in one place (this will be convenient -// both when updating the base client and for evaluating the UX of SDK library). So it is expected that all -// Object service packages will be limited to this package for the development of functionality requiring -// FrostFS API communication. -package internal diff --git a/pkg/services/object/internal/client/error.go b/pkg/services/object/internal/client/error.go deleted file mode 100644 index 98cdfcf95..000000000 --- a/pkg/services/object/internal/client/error.go +++ /dev/null @@ -1,14 +0,0 @@ -package internal - -import clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - -type errorReporter interface { - ReportError(error) -} - -// ReportError drops client connection if possible. -func ReportError(c clientcore.Client, err error) { - if ce, ok := c.(errorReporter); ok { - ce.ReportError(err) - } -} diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go deleted file mode 100644 index 1e0a7ef90..000000000 --- a/pkg/services/object/internal/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package internal - -import ( - "bytes" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" -) - -// VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not. -func VerifyResponseKeyV2(expectedKey []byte, resp interface { - GetVerificationHeader() *session.ResponseVerificationHeader -}, -) error { - if !bytes.Equal(resp.GetVerificationHeader().GetBodySignature().GetKey(), expectedKey) { - return client.ErrWrongPublicKey - } - - return nil -} diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go deleted file mode 100644 index 6a6ee0f0f..000000000 --- a/pkg/services/object/metrics.go +++ /dev/null @@ -1,230 +0,0 @@ -package object - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -type ( - MetricCollector struct { - next ServiceServer - metrics MetricRegister - enabled bool - } - - getStreamMetric struct { - util.ServerStream - stream GetObjectStream - metrics MetricRegister - } - - putStreamMetric struct { - stream PutObjectStream - metrics MetricRegister - start time.Time - } - - patchStreamMetric struct { - stream PatchObjectStream - metrics MetricRegister - start time.Time - } - - MetricRegister interface { - AddRequestDuration(string, time.Duration, bool, string) - AddPayloadSize(string, int) - } -) - -func NewMetricCollector(next ServiceServer, register MetricRegister, enabled bool) *MetricCollector { - return &MetricCollector{ - next: next, - metrics: register, - enabled: enabled, - } -} - -func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (err error) { - if m.enabled { - t := time.Now() - defer func() { - m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) - }() - err = m.next.Get(req, &getStreamMetric{ - ServerStream: stream, - stream: stream, - metrics: m.metrics, - }) - } else { - err = m.next.Get(req, stream) - } - return -} - -func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) { - if m.enabled { - t := time.Now() - - stream, err := m.next.Put(ctx) - if err != nil { - return nil, err - } - - return &putStreamMetric{ - stream: stream, - metrics: m.metrics, - start: t, - }, nil - } - return m.next.Put(ctx) -} - -func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) { - if m.enabled { - t := time.Now() - - stream, err := m.next.Patch(ctx) - if err != nil { - return nil, err - } - - return &patchStreamMetric{ - stream: stream, - metrics: m.metrics, - start: t, - }, nil - } - return m.next.Patch(ctx) -} - -func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) { - if m.enabled { - t := time.Now() - - res, err := m.next.PutSingle(ctx, request) - - m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) - if err == nil { - m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload())) - } - - return res, err - } - return m.next.PutSingle(ctx, request) -} - -func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) { - if m.enabled { - t := time.Now() - - res, err := m.next.Head(ctx, request) - - m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) - - return res, err - } - return m.next.Head(ctx, request) -} - -func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream) error { - if m.enabled { - t := time.Now() - - err := m.next.Search(req, stream) - - m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) - - return err - } - return m.next.Search(req, stream) -} - -func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteRequest) (*object.DeleteResponse, error) { - if m.enabled { - t := time.Now() - - res, err := m.next.Delete(ctx, request) - - m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) - return res, err - } - return m.next.Delete(ctx, request) -} - -func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error { - if m.enabled { - t := time.Now() - - err := m.next.GetRange(req, stream) - - m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) - - return err - } - return m.next.GetRange(req, stream) -} - -func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - if m.enabled { - t := time.Now() - - res, err := m.next.GetRangeHash(ctx, request) - - m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) - - return res, err - } - return m.next.GetRangeHash(ctx, request) -} - -func (m *MetricCollector) Enable() { - m.enabled = true -} - -func (m *MetricCollector) Disable() { - m.enabled = false -} - -func (s getStreamMetric) Send(resp *object.GetResponse) error { - chunk, ok := resp.GetBody().GetObjectPart().(*object.GetObjectPartChunk) - if ok { - s.metrics.AddPayloadSize("Get", len(chunk.GetChunk())) - } - - return s.stream.Send(resp) -} - -func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error { - chunk, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartChunk) - if ok { - s.metrics.AddPayloadSize("Put", len(chunk.GetChunk())) - } - - return s.stream.Send(ctx, req) -} - -func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { - res, err := s.stream.CloseAndRecv(ctx) - - s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) - - return res, err -} - -func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error { - s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().GetChunk())) - - return s.stream.Send(ctx, req) -} - -func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { - res, err := s.stream.CloseAndRecv(ctx) - - s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) - - return res, err -} diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go deleted file mode 100644 index cb3f7c342..000000000 --- a/pkg/services/object/patch/range_provider.go +++ /dev/null @@ -1,75 +0,0 @@ -package patchsvc - -import ( - "context" - "crypto/ecdsa" - "io" - - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - objectUtil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - patcherSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher" -) - -func (p *pipeChunkWriter) WriteChunk(_ context.Context, chunk []byte) error { - _, err := p.wr.Write(chunk) - return err -} - -type rangeProvider struct { - getSvc *getsvc.Service - - addr oid.Address - - commonPrm *objectUtil.CommonPrm - - localNodeKey *ecdsa.PrivateKey -} - -var _ patcherSDK.RangeProvider = (*rangeProvider)(nil) - -func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader { - // Remote GetRange request to a container node uses an SDK-client that fails range validation - // with zero-length. However, from the patcher's point of view, such request is still valid. - if rng.GetLength() == 0 { - return &nopReader{} - } - - pipeReader, pipeWriter := io.Pipe() - - var rngPrm getsvc.RangePrm - rngPrm.SetSignerKey(r.localNodeKey) - rngPrm.SetCommonParameters(r.commonPrm) - - rngPrm.WithAddress(r.addr) - rngPrm.SetChunkWriter(&pipeChunkWriter{ - wr: pipeWriter, - }) - rngPrm.SetRange(rng) - - getRangeErr := make(chan error) - - go func() { - defer pipeWriter.Close() - - select { - case <-ctx.Done(): - pipeWriter.CloseWithError(ctx.Err()) - case err := <-getRangeErr: - pipeWriter.CloseWithError(err) - } - }() - - go func() { - getRangeErr <- r.getSvc.GetRange(ctx, rngPrm) - }() - - return pipeReader -} - -type nopReader struct{} - -func (nopReader) Read(_ []byte) (int, error) { - return 0, io.EOF -} diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go deleted file mode 100644 index 5d298bfed..000000000 --- a/pkg/services/object/patch/service.go +++ /dev/null @@ -1,41 +0,0 @@ -package patchsvc - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" -) - -// Service implements Put operation of Object service v2. -type Service struct { - *objectwriter.Config - - getSvc *getsvc.Service -} - -// NewService constructs Service instance from provided options. -// -// Patch service can use the same objectwriter.Config initializied by Put service. -func NewService(cfg *objectwriter.Config, - getSvc *getsvc.Service, -) *Service { - return &Service{ - Config: cfg, - - getSvc: getSvc, - } -} - -// Patch calls internal service and returns v2 object streamer. -func (s *Service) Patch() (object.PatchObjectStream, error) { - nodeKey, err := s.KeyStorage.GetKey(nil) - if err != nil { - return nil, err - } - - return &Streamer{ - Config: s.Config, - getSvc: s.getSvc, - localNodeKey: nodeKey, - }, nil -} diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go deleted file mode 100644 index ff13b1d3e..000000000 --- a/pkg/services/object/patch/streamer.go +++ /dev/null @@ -1,243 +0,0 @@ -package patchsvc - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher" -) - -// Streamer for the patch handler is a pipeline that merges two incoming streams of patches -// and original object payload chunks. The merged result is fed to Put stream target. -type Streamer struct { - *objectwriter.Config - - // Patcher must be initialized at first Streamer.Send call. - patcher patcher.PatchApplier - - nonFirstSend bool - - getSvc *getsvc.Service - - localNodeKey *ecdsa.PrivateKey -} - -type pipeChunkWriter struct { - wr *io.PipeWriter -} - -type headResponseWriter struct { - body *objectV2.HeadResponseBody -} - -func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error { - w.body.SetHeaderPart(toFullObjectHeader(hdr)) - return nil -} - -func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart { - obj := hdr.ToV2() - - hs := new(objectV2.HeaderWithSignature) - hs.SetHeader(obj.GetHeader()) - hs.SetSignature(obj.GetSignature()) - - return hs -} - -func isLinkObject(hdr *objectV2.HeaderWithSignature) bool { - split := hdr.GetHeader().GetSplit() - return len(split.GetChildren()) > 0 && split.GetParent() != nil -} - -func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool { - return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil -} - -func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { - hdrWithSig, addr, err := s.readHeader(ctx, req) - if err != nil { - return err - } - - if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular { - return errors.New("non-regular object can't be patched") - } - if isLinkObject(hdrWithSig) { - return errors.New("linking object can't be patched") - } - if isComplexObjectPart(hdrWithSig) { - return errors.New("complex object parts can't be patched") - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return err - } - commonPrm.WithLocalOnly(false) - - rangeProvider := &rangeProvider{ - getSvc: s.getSvc, - - addr: addr, - - commonPrm: commonPrm, - - localNodeKey: s.localNodeKey, - } - - hdr := hdrWithSig.GetHeader() - oV2 := new(objectV2.Object) - hV2 := new(objectV2.Header) - oV2.SetHeader(hV2) - oV2.GetHeader().SetContainerID(hdr.GetContainerID()) - oV2.GetHeader().SetPayloadLength(hdr.GetPayloadLength()) - oV2.GetHeader().SetAttributes(hdr.GetAttributes()) - - ownerID, err := newOwnerID(req.GetVerificationHeader()) - if err != nil { - return err - } - oV2.GetHeader().SetOwnerID(ownerID) - - target, err := target.New(ctx, objectwriter.Params{ - Config: s.Config, - Common: commonPrm, - Header: objectSDK.NewFromV2(oV2), - }) - if err != nil { - return fmt.Errorf("target creation: %w", err) - } - - patcherPrm := patcher.Params{ - Header: objectSDK.NewFromV2(oV2), - - RangeProvider: rangeProvider, - - ObjectWriter: target, - } - - s.patcher = patcher.New(patcherPrm) - return nil -} - -func (s *Streamer) readHeader(ctx context.Context, req *objectV2.PatchRequest) (hdrWithSig *objectV2.HeaderWithSignature, addr oid.Address, err error) { - addrV2 := req.GetBody().GetAddress() - if addrV2 == nil { - err = errors.New("patch request has nil-address") - return - } - - if err = addr.ReadFromV2(*addrV2); err != nil { - err = fmt.Errorf("read address error: %w", err) - return - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return - } - commonPrm.WithLocalOnly(false) - - var p getsvc.HeadPrm - p.SetSignerKey(s.localNodeKey) - p.SetCommonParameters(commonPrm) - - resp := new(objectV2.HeadResponse) - resp.SetBody(new(objectV2.HeadResponseBody)) - - p.WithAddress(addr) - p.SetHeaderWriter(&headResponseWriter{ - body: resp.GetBody(), - }) - - err = s.getSvc.Head(ctx, p) - if err != nil { - err = fmt.Errorf("get header error: %w", err) - return - } - - var ok bool - hdrPart := resp.GetBody().GetHeaderPart() - if hdrWithSig, ok = hdrPart.(*objectV2.HeaderWithSignature); !ok { - err = fmt.Errorf("unexpected header type: %T", hdrPart) - } - return -} - -func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { - ctx, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send") - defer span.End() - - defer func() { - s.nonFirstSend = true - }() - - if !s.nonFirstSend { - if err := s.init(ctx, req); err != nil { - return fmt.Errorf("streamer init error: %w", err) - } - } - - patch := new(objectSDK.Patch) - patch.FromV2(req.GetBody()) - - if !s.nonFirstSend { - err := s.patcher.ApplyHeaderPatch(ctx, - patcher.ApplyHeaderPatchPrm{ - NewSplitHeader: patch.NewSplitHeader, - NewAttributes: patch.NewAttributes, - ReplaceAttributes: patch.ReplaceAttributes, - }) - if err != nil { - return fmt.Errorf("patch attributes: %w", err) - } - } - - if patch.PayloadPatch != nil { - err := s.patcher.ApplyPayloadPatch(ctx, patch.PayloadPatch) - if err != nil { - return fmt.Errorf("patch payload: %w", err) - } - } else if s.nonFirstSend { - return errors.New("invalid non-first patch: empty payload") - } - - return nil -} - -func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { - if s.patcher == nil { - return nil, errors.New("uninitialized patch streamer") - } - patcherResp, err := s.patcher.Close(ctx) - if err != nil { - return nil, err - } - - oidV2 := new(refsV2.ObjectID) - - if patcherResp.AccessIdentifiers.ParentID != nil { - patcherResp.AccessIdentifiers.ParentID.WriteToV2(oidV2) - } else { - patcherResp.AccessIdentifiers.SelfID.WriteToV2(oidV2) - } - - return &objectV2.PatchResponse{ - Body: &objectV2.PatchResponseBody{ - ObjectID: oidV2, - }, - }, nil -} diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go deleted file mode 100644 index b9416789c..000000000 --- a/pkg/services/object/patch/util.go +++ /dev/null @@ -1,34 +0,0 @@ -package patchsvc - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) { - for vh.GetOrigin() != nil { - vh = vh.GetOrigin() - } - sig := vh.GetBodySignature() - if sig == nil { - return nil, errors.New("empty body signature") - } - key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("invalid signature key: %w", err) - } - - var userID user.ID - user.IDFromKey(&userID, (ecdsa.PublicKey)(*key)) - ownID := new(refs.OwnerID) - userID.WriteToV2(ownID) - - return ownID, nil -} diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go deleted file mode 100644 index 52a7c102c..000000000 --- a/pkg/services/object/put/prm.go +++ /dev/null @@ -1,67 +0,0 @@ -package putsvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -type PutInitPrm struct { - common *util.CommonPrm - - hdr *objectSDK.Object - - cnr containerSDK.Container - - traverseOpts []placement.Option - - relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error -} - -type PutChunkPrm struct { - chunk []byte -} - -func (p *PutInitPrm) WithCommonPrm(v *util.CommonPrm) *PutInitPrm { - if p != nil { - p.common = v - } - - return p -} - -func (p *PutInitPrm) WithObject(v *objectSDK.Object) *PutInitPrm { - if p != nil { - p.hdr = v - } - - return p -} - -func (p *PutInitPrm) WithCopyNumbers(v []uint32) *PutInitPrm { - if p != nil && len(v) > 0 { - p.traverseOpts = append(p.traverseOpts, placement.WithCopyNumbers(v)) - } - - return p -} - -func (p *PutInitPrm) WithRelay(f func(context.Context, client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm { - if p != nil { - p.relay = f - } - - return p -} - -func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm { - if p != nil { - p.chunk = v - } - - return p -} diff --git a/pkg/services/object/put/res.go b/pkg/services/object/put/res.go deleted file mode 100644 index c77e535fd..000000000 --- a/pkg/services/object/put/res.go +++ /dev/null @@ -1,13 +0,0 @@ -package putsvc - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type PutResponse struct { - id oid.ID -} - -func (r *PutResponse) ObjectID() oid.ID { - return r.id -} diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go deleted file mode 100644 index 7aeb5857d..000000000 --- a/pkg/services/object/put/service.go +++ /dev/null @@ -1,63 +0,0 @@ -package putsvc - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -type Service struct { - *objectwriter.Config -} - -func NewService(ks *objutil.KeyStorage, - cc objectwriter.ClientConstructor, - ms objectwriter.MaxSizeSource, - os objectwriter.ObjectStorage, - cs container.Source, - ns netmap.Source, - nk netmap.AnnouncedKeys, - nst netmap.State, - ir objectwriter.InnerRing, - opts ...objectwriter.Option, -) *Service { - c := &objectwriter.Config{ - Logger: logger.NewLoggerWrapper(zap.L()), - KeyStorage: ks, - ClientConstructor: cc, - MaxSizeSrc: ms, - LocalStore: os, - ContainerSource: cs, - NetmapSource: ns, - NetmapKeys: nk, - NetworkState: nst, - } - - for i := range opts { - opts[i](c) - } - - c.FormatValidator = object.NewFormatValidator( - object.WithLockSource(os), - object.WithNetState(nst), - object.WithInnerRing(ir), - object.WithNetmapSource(ns), - object.WithContainersSource(cs), - object.WithVerifySessionTokenIssuer(c.VerifySessionTokenIssuer), - object.WithLogger(c.Logger), - ) - - return &Service{ - Config: c, - } -} - -func (s *Service) Put() (*Streamer, error) { - return &Streamer{ - Config: s.Config, - }, nil -} diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go deleted file mode 100644 index 90f473254..000000000 --- a/pkg/services/object/put/single.go +++ /dev/null @@ -1,363 +0,0 @@ -package putsvc - -import ( - "bytes" - "context" - "crypto/sha256" - "errors" - "fmt" - "hash" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" - svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/tzhash/tz" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var errInvalidPayloadChecksum = errors.New("incorrect payload checksum") - -type putSingleRequestSigner struct { - req *objectAPI.PutSingleRequest - keyStorage *svcutil.KeyStorage - signer *sync.Once -} - -func (s *putSingleRequestSigner) GetRequestWithSignedHeader() (*objectAPI.PutSingleRequest, error) { - var resErr error - s.signer.Do(func() { - metaHdr := new(sessionV2.RequestMetaHeader) - meta := s.req.GetMetaHeader() - - metaHdr.SetTTL(meta.GetTTL() - 1) - metaHdr.SetOrigin(meta) - s.req.SetMetaHeader(metaHdr) - - privateKey, err := s.keyStorage.GetKey(nil) - if err != nil { - resErr = err - return - } - resErr = signature.SignServiceMessage(privateKey, s.req) - }) - return s.req, resErr -} - -func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest) (*objectAPI.PutSingleResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "putsvc.PutSingle") - defer span.End() - - obj := objectSDK.NewFromV2(req.GetBody().GetObject()) - - meta, err := s.validatePutSingle(ctx, obj) - if err != nil { - return nil, err - } - - if err := s.saveToNodes(ctx, obj, req, meta); err != nil { - return nil, err - } - - resp := &objectAPI.PutSingleResponse{} - resp.SetBody(&objectAPI.PutSingleResponseBody{}) - return resp, nil -} - -func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.validarePutSingleSize(ctx, obj); err != nil { - return object.ContentMeta{}, err - } - - if err := s.validatePutSingleChecksum(obj); err != nil { - return object.ContentMeta{}, err - } - - return s.validatePutSingleObject(ctx, obj) -} - -func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error { - if uint64(len(obj.Payload())) != obj.PayloadSize() { - return target.ErrWrongPayloadSize - } - - maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) - if obj.PayloadSize() > maxAllowedSize { - return target.ErrExceedingMaxSize - } - - return nil -} - -func (s *Service) validatePutSingleChecksum(obj *objectSDK.Object) error { - cs, csSet := obj.PayloadChecksum() - if !csSet { - return errors.New("missing payload checksum") - } - - var hash hash.Hash - - switch typ := cs.Type(); typ { - default: - return fmt.Errorf("unsupported payload checksum type %v", typ) - case checksum.SHA256: - hash = sha256.New() - case checksum.TZ: - hash = tz.New() - } - - if _, err := hash.Write(obj.Payload()); err != nil { - return fmt.Errorf("could not compute payload hash: %w", err) - } - - if !bytes.Equal(hash.Sum(nil), cs.Value()) { - return errInvalidPayloadChecksum - } - - return nil -} - -func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.FormatValidator.Validate(ctx, obj, false); err != nil { - return object.ContentMeta{}, fmt.Errorf("coud not validate object format: %w", err) - } - - meta, err := s.FormatValidator.ValidateContent(obj) - if err != nil { - return object.ContentMeta{}, fmt.Errorf("could not validate payload content: %w", err) - } - - return meta, nil -} - -func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - localOnly := req.GetMetaHeader().GetTTL() <= 1 - placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly) - if err != nil { - return err - } - - if placement.isEC { - return s.saveToECReplicas(ctx, placement, obj, req, meta) - } - - return s.saveToREPReplicas(ctx, placement, obj, localOnly, req, meta) -} - -func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.NewNodeIterator(placement.placementOptions) - iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) - iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast - - signer := &putSingleRequestSigner{ - req: req, - keyStorage: s.KeyStorage, - signer: &sync.Once{}, - } - - return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error { - return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container) - }) -} - -func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - commonPrm, err := svcutil.CommonPrmFromV2(req) - if err != nil { - return err - } - key, err := s.KeyStorage.GetKey(nil) - if err != nil { - return err - } - signer := &putSingleRequestSigner{ - req: req, - keyStorage: s.KeyStorage, - signer: &sync.Once{}, - } - - w := objectwriter.ECWriter{ - Config: s.Config, - PlacementOpts: placement.placementOptions, - ObjectMeta: meta, - ObjectMetaValid: true, - CommonPrm: commonPrm, - Container: placement.container, - Key: key, - Relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error { - return s.redirectPutSingleRequest(ctx, signer, obj, ni, mac) - }, - } - return w.WriteObject(ctx, obj) -} - -type putSinglePlacement struct { - placementOptions []placement.Option - isEC bool - container containerSDK.Container - resetSuccessAfterOnBroadcast bool -} - -func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { - var result putSinglePlacement - - cnrID, ok := obj.ContainerID() - if !ok { - return result, errors.New("missing container ID") - } - cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) - if err != nil { - return result, fmt.Errorf("could not get container by ID: %w", err) - } - result.container = cnrInfo.Value - result.isEC = container.IsECContainer(cnrInfo.Value) && object.IsECSupported(obj) - if len(copiesNumber) > 0 && !result.isEC { - result.placementOptions = append(result.placementOptions, placement.WithCopyNumbers(copiesNumber)) - } - if container.IsECContainer(cnrInfo.Value) && !object.IsECSupported(obj) && !localOnly { - result.placementOptions = append(result.placementOptions, placement.SuccessAfter(uint32(policy.ECParityCount(cnrInfo.Value.PlacementPolicy())+1))) - result.resetSuccessAfterOnBroadcast = true - } - result.placementOptions = append(result.placementOptions, placement.ForContainer(cnrInfo.Value)) - - objID, ok := obj.ID() - if !ok { - return result, errors.New("missing object ID") - } - if obj.ECHeader() != nil { - objID = obj.ECHeader().Parent() - } - result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - - latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) - if err != nil { - return result, fmt.Errorf("could not get latest network map: %w", err) - } - builder := placement.NewNetworkMapBuilder(latestNetmap) - if localOnly { - result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) - } - result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) - return result, nil -} - -func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object, - signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container, -) error { - if nodeDesc.Local { - return s.saveLocal(ctx, obj, meta, container) - } - - var info client.NodeInfo - - client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - - c, err := s.ClientConstructor.Get(info) - if err != nil { - return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) - } - - return s.redirectPutSingleRequest(ctx, signer, obj, info, c) -} - -func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { - localTarget := &objectwriter.LocalTarget{ - Storage: s.LocalStore, - Container: container, - } - return localTarget.WriteObject(ctx, obj, meta) -} - -func (s *Service) redirectPutSingleRequest(ctx context.Context, - signer *putSingleRequestSigner, - obj *objectSDK.Object, - info client.NodeInfo, - c client.MultiAddressClient, -) error { - ctx, span := tracing.StartSpanFromContext(ctx, "putService.redirectPutSingleRequest") - defer span.End() - - var req *objectAPI.PutSingleRequest - var firstErr error - req, firstErr = signer.GetRequestWithSignedHeader() - if firstErr != nil { - return firstErr - } - - info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { - ctx, span := tracing.StartSpanFromContext(ctx, "putService.redirectPutSingleRequest.IterateAddresses", - trace.WithAttributes( - attribute.String("address", addr.String()))) - defer span.End() - - var err error - - defer func() { - if err != nil { - objID, _ := obj.ID() - cnrID, _ := obj.ContainerID() - s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, - zap.Error(err), - zap.Stringer("address", addr), - zap.Stringer("object_id", objID), - zap.Stringer("container_id", cnrID), - ) - } - - stop = err == nil - if stop || firstErr == nil { - firstErr = err - } - }() - - var resp *objectAPI.PutSingleResponse - - err = c.RawForAddress(ctx, addr, func(cli *rawclient.Client) error { - var e error - resp, e = rpc.PutSingleObject(cli, req, rawclient.WithContext(ctx)) - return e - }) - if err != nil { - err = fmt.Errorf("failed to execute request: %w", err) - return - } - - if err = internal.VerifyResponseKeyV2(info.PublicKey(), resp); err != nil { - return - } - - err = signature.VerifyServiceMessage(resp) - if err != nil { - err = fmt.Errorf("response verification failed: %w", err) - return - } - - st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus()) - err = apistatus.ErrFromStatus(st) - - return - }) - - return firstErr -} diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go deleted file mode 100644 index 19768b7fa..000000000 --- a/pkg/services/object/put/streamer.go +++ /dev/null @@ -1,82 +0,0 @@ -package putsvc - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" -) - -type Streamer struct { - *objectwriter.Config - - target transformer.ChunkedObjectWriter -} - -var errNotInit = errors.New("stream not initialized") - -var errInitRecall = errors.New("init recall") - -func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { - if p.target != nil { - return errInitRecall - } - - // initialize destination target - prmTarget := objectwriter.Params{ - Config: p.Config, - Common: prm.common, - Header: prm.hdr, - Container: prm.cnr, - TraverseOpts: prm.traverseOpts, - Relay: prm.relay, - } - - var err error - p.target, err = target.New(ctx, prmTarget) - if err != nil { - return fmt.Errorf("(%T) could not initialize object target: %w", p, err) - } - - if err := p.target.WriteHeader(ctx, prm.hdr); err != nil { - return fmt.Errorf("(%T) could not write header to target: %w", p, err) - } - return nil -} - -func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error { - if p.target == nil { - return errNotInit - } - - if _, err := p.target.Write(ctx, prm.chunk); err != nil { - return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err) - } - - return nil -} - -func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) { - if p.target == nil { - return nil, errNotInit - } - - ids, err := p.target.Close(ctx) - if err != nil { - return nil, fmt.Errorf("(%T) could not close object target: %w", p, err) - } - - id := ids.ParentID - if id != nil { - return &PutResponse{ - id: *id, - }, nil - } - - return &PutResponse{ - id: ids.SelfID, - }, nil -} diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go deleted file mode 100644 index 78d4c711d..000000000 --- a/pkg/services/object/put/v2/service.go +++ /dev/null @@ -1,42 +0,0 @@ -package putsvc - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -// Service implements Put operation of Object service v2. -type Service struct { - svc *putsvc.Service - keyStorage *util.KeyStorage -} - -// NewService constructs Service instance from provided options. -func NewService(svc *putsvc.Service, ks *util.KeyStorage) *Service { - return &Service{ - svc: svc, - keyStorage: ks, - } -} - -// Put calls internal service and returns v2 object streamer. -func (s *Service) Put() (object.PutObjectStream, error) { - stream, err := s.svc.Put() - if err != nil { - return nil, fmt.Errorf("(%T) could not open object put stream: %w", s, err) - } - - return &streamer{ - stream: stream, - keyStorage: s.keyStorage, - }, nil -} - -func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest) (*objectAPI.PutSingleResponse, error) { - return s.svc.PutSingle(ctx, req) -} diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go deleted file mode 100644 index f0c648187..000000000 --- a/pkg/services/object/put/v2/streamer.go +++ /dev/null @@ -1,212 +0,0 @@ -package putsvc - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -type streamer struct { - stream *putsvc.Streamer - keyStorage *util.KeyStorage - saveChunks bool - init *object.PutRequest - chunks []*object.PutRequest - - *sizes // only for relay streams -} - -type sizes struct { - payloadSz uint64 // value from the header - - writtenPayload uint64 // sum size of already cached chunks -} - -func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) { - ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.Send") - defer span.End() - - switch v := req.GetBody().GetObjectPart().(type) { - case *object.PutObjectPartInit: - var initPrm *putsvc.PutInitPrm - - initPrm, err = s.toInitPrm(v, req) - if err != nil { - return err - } - - if err = s.stream.Init(ctx, initPrm); err != nil { - err = fmt.Errorf("(%T) could not init object put stream: %w", s, err) - } - - s.saveChunks = v.GetSignature() != nil - if s.saveChunks { - maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx) - - s.sizes = &sizes{ - payloadSz: v.GetHeader().GetPayloadLength(), - } - - // check payload size limit overflow - if s.payloadSz > maxSz { - return target.ErrExceedingMaxSize - } - - s.init = req - } - case *object.PutObjectPartChunk: - if s.saveChunks { - s.writtenPayload += uint64(len(v.GetChunk())) - - // check payload size overflow - if s.writtenPayload > s.payloadSz { - return target.ErrWrongPayloadSize - } - } - - if err = s.stream.SendChunk(ctx, toChunkPrm(v)); err != nil { - err = fmt.Errorf("(%T) could not send payload chunk: %w", s, err) - } - - if s.saveChunks { - s.chunks = append(s.chunks, req) - } - default: - err = fmt.Errorf("(%T) invalid object put stream part type %T", s, v) - } - - if err != nil || !s.saveChunks { - return - } - - metaHdr := new(sessionV2.RequestMetaHeader) - meta := req.GetMetaHeader() - - metaHdr.SetTTL(meta.GetTTL() - 1) - metaHdr.SetOrigin(meta) - req.SetMetaHeader(metaHdr) - - // session token should not be used there - // otherwise remote nodes won't be able to - // process received part of split object - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return err - } - return signature.SignServiceMessage(key, req) -} - -func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.CloseAndRecv") - defer span.End() - - if s.saveChunks { - // check payload size correctness - if s.writtenPayload != s.payloadSz { - return nil, target.ErrWrongPayloadSize - } - } - - resp, err := s.stream.Close(ctx) - if err != nil { - return nil, fmt.Errorf("(%T) could not object put stream: %w", s, err) - } - - return fromPutResponse(resp), nil -} - -func (s *streamer) relayRequest(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) error { - ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.relayRequest") - defer span.End() - - // open stream - resp := new(object.PutResponse) - - key := info.PublicKey() - - var firstErr error - - info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { - ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.iterateAddress", - trace.WithAttributes( - attribute.String("address", addr.String()), - )) - defer span.End() - - var err error - - defer func() { - stop = err == nil - - if stop || firstErr == nil { - firstErr = err - } - - // would be nice to log otherwise - }() - - var stream *rpc.PutRequestWriter - - err = c.RawForAddress(ctx, addr, func(cli *rawclient.Client) error { - stream, err = rpc.PutObject(cli, resp, rawclient.WithContext(ctx)) - return err - }) - if err != nil { - err = fmt.Errorf("stream opening failed: %w", err) - return - } - - // send init part - err = stream.Write(s.init) - if err != nil { - internalclient.ReportError(c, err) - err = fmt.Errorf("sending the initial message to stream failed: %w", err) - return - } - - for i := range s.chunks { - if err = stream.Write(s.chunks[i]); err != nil { - internalclient.ReportError(c, err) - err = fmt.Errorf("sending the chunk %d failed: %w", i, err) - return - } - } - - // close object stream and receive response from remote node - err = stream.Close() - if err != nil { - err = fmt.Errorf("closing the stream failed: %w", err) - return - } - - // verify response key - if err = internal.VerifyResponseKeyV2(key, resp); err != nil { - return - } - - // verify response structure - err = signature.VerifyServiceMessage(resp) - if err != nil { - err = fmt.Errorf("response verification failed: %w", err) - } - - return - }) - - return firstErr -} diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go deleted file mode 100644 index 5ec9ebe10..000000000 --- a/pkg/services/object/put/v2/util.go +++ /dev/null @@ -1,47 +0,0 @@ -package putsvc - -import ( - putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -func (s *streamer) toInitPrm(part *objectV2.PutObjectPartInit, req *objectV2.PutRequest) (*putsvc.PutInitPrm, error) { - oV2 := new(objectV2.Object) - oV2.SetObjectID(part.GetObjectID()) - oV2.SetSignature(part.GetSignature()) - oV2.SetHeader(part.GetHeader()) - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - return new(putsvc.PutInitPrm). - WithObject( - objectSDK.NewFromV2(oV2), - ). - WithRelay(s.relayRequest). - WithCommonPrm(commonPrm). - WithCopyNumbers(part.GetCopiesNumber()), nil -} - -func toChunkPrm(req *objectV2.PutObjectPartChunk) *putsvc.PutChunkPrm { - return new(putsvc.PutChunkPrm). - WithChunk(req.GetChunk()) -} - -func fromPutResponse(r *putsvc.PutResponse) *objectV2.PutResponse { - var idV2 refsV2.ObjectID - r.ObjectID().WriteToV2(&idV2) - - body := new(objectV2.PutResponseBody) - body.SetObjectID(&idV2) - - resp := new(objectV2.PutResponse) - resp.SetBody(body) - - return resp -} diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go deleted file mode 100644 index 01eb1ea8d..000000000 --- a/pkg/services/object/qos.go +++ /dev/null @@ -1,145 +0,0 @@ -package object - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" -) - -var _ ServiceServer = (*qosObjectService)(nil) - -type AdjustIOTag interface { - AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context -} - -type qosObjectService struct { - next ServiceServer - adj AdjustIOTag -} - -func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer { - return &qosObjectService{ - next: next, - adj: adjIOTag, - } -} - -func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Delete(ctx, req) -} - -func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error { - ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Get(req, &qosReadStream[*object.GetResponse]{ - ctxF: func() context.Context { return ctx }, - sender: s, - }) -} - -func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error { - ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{ - ctxF: func() context.Context { return ctx }, - sender: s, - }) -} - -func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.GetRangeHash(ctx, req) -} - -func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Head(ctx, req) -} - -func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) { - s, err := q.next.Patch(ctx) - if err != nil { - return nil, err - } - return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{ - s: s, - adj: q.adj, - }, nil -} - -func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) { - s, err := q.next.Put(ctx) - if err != nil { - return nil, err - } - return &qosWriteStream[*object.PutRequest, *object.PutResponse]{ - s: s, - adj: q.adj, - }, nil -} - -func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.PutSingle(ctx, req) -} - -func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error { - ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Search(req, &qosReadStream[*object.SearchResponse]{ - ctxF: func() context.Context { return ctx }, - sender: s, - }) -} - -type qosSend[T any] interface { - Send(T) error -} - -type qosReadStream[T any] struct { - sender qosSend[T] - ctxF func() context.Context -} - -func (g *qosReadStream[T]) Context() context.Context { - return g.ctxF() -} - -func (g *qosReadStream[T]) Send(resp T) error { - return g.sender.Send(resp) -} - -type qosVerificationHeader interface { - GetVerificationHeader() *session.RequestVerificationHeader -} - -type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { - Send(context.Context, TReq) error - CloseAndRecv(context.Context) (TResp, error) -} - -type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { - s qosSendRecv[TReq, TResp] - adj AdjustIOTag - - ioTag string - ioTagDefined bool -} - -func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { - if q.ioTagDefined { - ctx = tagging.ContextWithIOTag(ctx, q.ioTag) - } - return q.s.CloseAndRecv(ctx) -} - -func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { - if !q.ioTagDefined { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx) - } - assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment") - ctx = tagging.ContextWithIOTag(ctx, q.ioTag) - return q.s.Send(ctx, req) -} diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go deleted file mode 100644 index bc6ffd160..000000000 --- a/pkg/services/object/remote_reader.go +++ /dev/null @@ -1,141 +0,0 @@ -package object - -import ( - "context" - "fmt" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type ClientConstructor interface { - Get(clientcore.NodeInfo) (clientcore.MultiAddressClient, error) -} - -// RemoteReader represents utility for getting -// the object from a remote host. -type RemoteReader struct { - keyStorage *util.KeyStorage - - clientCache ClientConstructor -} - -// RemoteRequestPrm groups remote operation parameters. -type RemoteRequestPrm struct { - addr oid.Address - raw bool - node netmap.NodeInfo -} - -const remoteOpTTL = 1 - -// NewRemoteReader creates, initializes and returns new RemoteHeader instance. -func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader { - return &RemoteReader{ - keyStorage: keyStorage, - clientCache: cache, - } -} - -// WithNodeInfo sets information about the remote node. -func (p *RemoteRequestPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteRequestPrm { - if p != nil { - p.node = v - } - - return p -} - -// WithObjectAddress sets object address. -func (p *RemoteRequestPrm) WithObjectAddress(v oid.Address) *RemoteRequestPrm { - if p != nil { - p.addr = v - } - - return p -} - -func (p *RemoteRequestPrm) WithRaw(v bool) *RemoteRequestPrm { - if p != nil { - p.raw = v - } - return p -} - -// Head requests object header from the remote node. -func (h *RemoteReader) Head(ctx context.Context, prm *RemoteRequestPrm) (*objectSDK.Object, error) { - key, err := h.keyStorage.GetKey(nil) - if err != nil { - return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err) - } - - var info clientcore.NodeInfo - - err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node)) - if err != nil { - return nil, fmt.Errorf("parse client node info: %w", err) - } - - c, err := h.clientCache.Get(info) - if err != nil { - return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", h, info.AddressGroup(), err) - } - - var headPrm internalclient.HeadObjectPrm - - headPrm.SetClient(c) - headPrm.SetPrivateKey(key) - headPrm.SetAddress(prm.addr) - headPrm.SetTTL(remoteOpTTL) - if prm.raw { - headPrm.SetRawFlag() - } - - res, err := internalclient.HeadObject(ctx, headPrm) - if err != nil { - return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err) - } - - return res.Header(), nil -} - -func (h *RemoteReader) Get(ctx context.Context, prm *RemoteRequestPrm) (*objectSDK.Object, error) { - key, err := h.keyStorage.GetKey(nil) - if err != nil { - return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err) - } - - var info clientcore.NodeInfo - - err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node)) - if err != nil { - return nil, fmt.Errorf("parse client node info: %w", err) - } - - c, err := h.clientCache.Get(info) - if err != nil { - return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", h, info.AddressGroup(), err) - } - - var getPrm internalclient.GetObjectPrm - - getPrm.SetClient(c) - getPrm.SetPrivateKey(key) - getPrm.SetAddress(prm.addr) - getPrm.SetTTL(remoteOpTTL) - if prm.raw { - getPrm.SetRawFlag() - } - - res, err := internalclient.GetObject(ctx, getPrm) - if err != nil { - return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err) - } - - return res.Object(), nil -} diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go deleted file mode 100644 index 80c971e8f..000000000 --- a/pkg/services/object/response.go +++ /dev/null @@ -1,186 +0,0 @@ -package object - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -type ResponseService struct { - respSvc *response.Service - - svc ServiceServer -} - -type searchStreamResponser struct { - SearchStream - - respSvc *response.Service -} - -type getStreamResponser struct { - GetObjectStream - - respSvc *response.Service -} - -type getRangeStreamResponser struct { - GetObjectRangeStream - - respSvc *response.Service -} - -type putStreamResponser struct { - stream PutObjectStream - respSvc *response.Service -} - -type patchStreamResponser struct { - stream PatchObjectStream - respSvc *response.Service -} - -// NewResponseService returns object service instance that passes internal service -// call to response service. -func NewResponseService(objSvc ServiceServer, respSvc *response.Service) *ResponseService { - return &ResponseService{ - respSvc: respSvc, - svc: objSvc, - } -} - -func (s *getStreamResponser) Send(resp *object.GetResponse) error { - s.respSvc.SetMeta(resp) - return s.GetObjectStream.Send(resp) -} - -func (s *ResponseService) Get(req *object.GetRequest, stream GetObjectStream) error { - return s.svc.Get(req, &getStreamResponser{ - GetObjectStream: stream, - respSvc: s.respSvc, - }) -} - -func (s *putStreamResponser) Send(ctx context.Context, req *object.PutRequest) error { - if err := s.stream.Send(ctx, req); err != nil { - return fmt.Errorf("could not send the request: %w", err) - } - return nil -} - -func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { - r, err := s.stream.CloseAndRecv(ctx) - if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) - } - - s.respSvc.SetMeta(r) - return r, nil -} - -func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) { - stream, err := s.svc.Put(ctx) - if err != nil { - return nil, fmt.Errorf("could not create Put object streamer: %w", err) - } - - return &putStreamResponser{ - stream: stream, - respSvc: s.respSvc, - }, nil -} - -func (s *patchStreamResponser) Send(ctx context.Context, req *object.PatchRequest) error { - if err := s.stream.Send(ctx, req); err != nil { - return fmt.Errorf("could not send the request: %w", err) - } - return nil -} - -func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { - r, err := s.stream.CloseAndRecv(ctx) - if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) - } - - s.respSvc.SetMeta(r) - return r, nil -} - -func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) { - stream, err := s.svc.Patch(ctx) - if err != nil { - return nil, fmt.Errorf("could not create Put object streamer: %w", err) - } - - return &patchStreamResponser{ - stream: stream, - respSvc: s.respSvc, - }, nil -} - -func (s *ResponseService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - resp, err := s.svc.PutSingle(ctx, req) - if err != nil { - return nil, err - } - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *ResponseService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { - resp, err := s.svc.Head(ctx, req) - if err != nil { - return nil, err - } - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *searchStreamResponser) Send(resp *object.SearchResponse) error { - s.respSvc.SetMeta(resp) - return s.SearchStream.Send(resp) -} - -func (s *ResponseService) Search(req *object.SearchRequest, stream SearchStream) error { - return s.svc.Search(req, &searchStreamResponser{ - SearchStream: stream, - respSvc: s.respSvc, - }) -} - -func (s *ResponseService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { - resp, err := s.svc.Delete(ctx, req) - if err != nil { - return nil, err - } - - s.respSvc.SetMeta(resp) - return resp, nil -} - -func (s *getRangeStreamResponser) Send(resp *object.GetRangeResponse) error { - s.respSvc.SetMeta(resp) - return s.GetObjectRangeStream.Send(resp) -} - -func (s *ResponseService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error { - return s.svc.GetRange(req, &getRangeStreamResponser{ - GetObjectRangeStream: stream, - respSvc: s.respSvc, - }) -} - -func (s *ResponseService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - resp, err := s.svc.GetRangeHash(ctx, req) - if err != nil { - return nil, err - } - - s.respSvc.SetMeta(resp) - return resp, nil -} diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go deleted file mode 100644 index 60d469b11..000000000 --- a/pkg/services/object/search/container.go +++ /dev/null @@ -1,124 +0,0 @@ -package searchsvc - -import ( - "context" - "encoding/hex" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "go.uber.org/zap" -) - -func (exec *execCtx) executeOnContainer(ctx context.Context) error { - lookupDepth := exec.netmapLookupDepth() - - exec.log.Debug(ctx, logs.TryingToExecuteInContainer, - zap.Uint64("netmap lookup depth", lookupDepth), - ) - - // initialize epoch number - if err := exec.initEpoch(ctx); err != nil { - return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err) - } - - for { - if err := exec.processCurrentEpoch(ctx); err != nil { - break - } - - // check the maximum depth has been reached - if lookupDepth == 0 { - break - } - - lookupDepth-- - - // go to the previous epoch - exec.curProcEpoch-- - } - - return nil -} - -func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { - exec.log.Debug(ctx, logs.ProcessEpoch, - zap.Uint64("number", exec.curProcEpoch), - ) - - traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch) - if err != nil { - return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err) - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - for { - addrs := traverser.Next() - if len(addrs) == 0 { - exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration) - break - } - - var wg sync.WaitGroup - var mtx sync.Mutex - - for i := range addrs { - wg.Add(1) - go func(i int) { - defer wg.Done() - select { - case <-ctx.Done(): - exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext, - zap.Error(ctx.Err())) - return - default: - } - - var info client.NodeInfo - - client.NodeInfoFromNetmapElement(&info, addrs[i]) - - exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) - - c, err := exec.svc.clientConstructor.get(info) - if err != nil { - exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err)) - return - } - - ids, err := c.searchObjects(ctx, exec, info) - if err != nil { - exec.log.Debug(ctx, logs.SearchRemoteOperationFailed, - zap.Error(err)) - - return - } - - mtx.Lock() - err = exec.writeIDList(ids) - mtx.Unlock() - if err != nil { - exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err)) - return - } - }(i) - } - - wg.Wait() - } - - return nil -} - -func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) { - cnrID := exec.containerID() - cnr, err := exec.svc.containerSource.Get(ctx, cnrID) - if err != nil { - return containerSDK.Container{}, err - } - return cnr.Value, nil -} diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go deleted file mode 100644 index ced51ecce..000000000 --- a/pkg/services/object/search/exec.go +++ /dev/null @@ -1,85 +0,0 @@ -package searchsvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type execCtx struct { - svc *Service - - prm Prm - - log *logger.Logger - - curProcEpoch uint64 -} - -func (exec *execCtx) setLogger(l *logger.Logger) { - exec.log = l.With( - zap.String("request", "SEARCH"), - zap.Stringer("container", exec.containerID()), - zap.Bool("local", exec.isLocal()), - zap.Bool("with session", exec.prm.common.SessionToken() != nil), - zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - ) -} - -func (exec *execCtx) isLocal() bool { - return exec.prm.common.LocalOnly() -} - -func (exec *execCtx) containerID() cid.ID { - return exec.prm.cnr -} - -func (exec *execCtx) searchFilters() objectSDK.SearchFilters { - return exec.prm.filters -} - -func (exec *execCtx) netmapEpoch() uint64 { - return exec.prm.common.NetmapEpoch() -} - -func (exec *execCtx) netmapLookupDepth() uint64 { - return exec.prm.common.NetmapLookupDepth() -} - -func (exec *execCtx) initEpoch(ctx context.Context) error { - exec.curProcEpoch = exec.netmapEpoch() - if exec.curProcEpoch > 0 { - return nil - } - - e, err := exec.svc.currentEpochReceiver.Epoch(ctx) - if err != nil { - return err - } - - exec.curProcEpoch = e - return nil -} - -func (exec *execCtx) writeIDList(ids []oid.ID) error { - ids = exec.filterAllowedObjectIDs(ids) - return exec.prm.writer.WriteIDs(ids) -} - -func (exec *execCtx) filterAllowedObjectIDs(objIDs []oid.ID) []oid.ID { - sessionToken := exec.prm.common.SessionToken() - if sessionToken == nil { - return objIDs - } - result := make([]oid.ID, 0, len(objIDs)) - for _, objID := range objIDs { - if sessionToken.AssertObject(objID) { - result = append(result, objID) - } - } - return result -} diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go deleted file mode 100644 index ec65ab06a..000000000 --- a/pkg/services/object/search/local.go +++ /dev/null @@ -1,23 +0,0 @@ -package searchsvc - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "go.uber.org/zap" -) - -func (exec *execCtx) executeLocal(ctx context.Context) error { - ids, err := exec.svc.localStorage.search(ctx, exec) - if err != nil { - exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err)) - return err - } - - if err := exec.writeIDList(ids); err != nil { - return fmt.Errorf("%s: %w", logs.SearchCouldNotWriteObjectIdentifiers, err) - } - - return nil -} diff --git a/pkg/services/object/search/prm.go b/pkg/services/object/search/prm.go deleted file mode 100644 index 95fe82e2f..000000000 --- a/pkg/services/object/search/prm.go +++ /dev/null @@ -1,60 +0,0 @@ -package searchsvc - -import ( - "context" - - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Prm groups parameters of Get service call. -type Prm struct { - writer *uniqueIDWriter - - common *util.CommonPrm - - cnr cid.ID - - filters objectSDK.SearchFilters - - forwarder RequestForwarder -} - -// IDListWriter is an interface of target component -// to write list of object identifiers. -type IDListWriter interface { - WriteIDs([]oid.ID) error -} - -// RequestForwarder is a callback for forwarding of the -// original Search requests. -type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) ([]oid.ID, error) - -// SetCommonParameters sets common parameters of the operation. -func (p *Prm) SetCommonParameters(common *util.CommonPrm) { - p.common = common -} - -// SetWriter sets target component to write list of object identifiers. -func (p *Prm) SetWriter(w IDListWriter) { - p.writer = newUniqueAddressWriter(w) -} - -// SetRequestForwarder sets callback for forwarding -// of the original request. -func (p *Prm) SetRequestForwarder(f RequestForwarder) { - p.forwarder = f -} - -// WithContainerID sets identifier of the container to search the objects. -func (p *Prm) WithContainerID(id cid.ID) { - p.cnr = id -} - -// WithSearchFilters sets search filters. -func (p *Prm) WithSearchFilters(fs objectSDK.SearchFilters) { - p.filters = fs -} diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go deleted file mode 100644 index 76c091f85..000000000 --- a/pkg/services/object/search/search.go +++ /dev/null @@ -1,45 +0,0 @@ -package searchsvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "go.uber.org/zap" -) - -// Search serves a request to select the objects. -func (s *Service) Search(ctx context.Context, prm Prm) error { - exec := &execCtx{ - svc: s, - prm: prm, - } - - exec.setLogger(s.log) - - return exec.execute(ctx) -} - -func (exec *execCtx) execute(ctx context.Context) error { - exec.log.Debug(ctx, logs.ServingRequest) - - err := exec.executeLocal(ctx) - exec.logResult(ctx, err) - - if exec.isLocal() { - exec.log.Debug(ctx, logs.SearchReturnResultDirectly) - return err - } - - err = exec.executeOnContainer(ctx) - exec.logResult(ctx, err) - return err -} - -func (exec *execCtx) logResult(ctx context.Context, err error) { - switch { - default: - exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) - case err == nil: - exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) - } -} diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go deleted file mode 100644 index 918ad421f..000000000 --- a/pkg/services/object/search/search_test.go +++ /dev/null @@ -1,626 +0,0 @@ -package searchsvc - -import ( - "context" - "crypto/rand" - "crypto/sha256" - "errors" - "fmt" - "slices" - "strconv" - "testing" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type idsErr struct { - ids []oid.ID - err error -} - -type testStorage struct { - items map[string]idsErr -} - -type testTraverserGenerator struct { - c container.Container - b map[uint64]placement.Builder -} - -type testPlacementBuilder struct { - vectors map[string][][]netmap.NodeInfo -} - -type testClientCache struct { - clients map[string]*testStorage -} - -type simpleIDWriter struct { - ids []oid.ID -} - -type testEpochReceiver uint64 - -func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { - return uint64(e), nil -} - -type errIDWriter struct { - err error -} - -func (e errIDWriter) WriteIDs(ids []oid.ID) error { - return e.err -} - -func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error { - s.ids = append(s.ids, ids...) - return nil -} - -func newTestStorage() *testStorage { - return &testStorage{ - items: make(map[string]idsErr), - } -} - -func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { - t, err := placement.NewTraverser(context.Background(), - placement.ForContainer(g.c), - placement.UseBuilder(g.b[epoch]), - placement.WithoutSuccessTracking(), - ) - return t, &containerCore.Container{Value: g.c}, err -} - -func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { - var addr oid.Address - addr.SetContainer(cnr) - - if obj != nil { - addr.SetObject(*obj) - } - - vs, ok := p.vectors[addr.EncodeToString()] - if !ok { - return nil, errors.New("vectors for address not found") - } - - res := slices.Clone(vs) - - return res, nil -} - -func (c *testClientCache) get(info clientcore.NodeInfo) (searchClient, error) { - v, ok := c.clients[network.StringifyGroup(info.AddressGroup())] - if !ok { - return nil, errors.New("could not construct client") - } - - return v, nil -} - -func (s *testStorage) search(_ context.Context, exec *execCtx) ([]oid.ID, error) { - v, ok := s.items[exec.containerID().EncodeToString()] - if !ok { - return nil, nil - } - - return v.ids, v.err -} - -func (c *testStorage) searchObjects(_ context.Context, exec *execCtx, _ clientcore.NodeInfo) ([]oid.ID, error) { - v, ok := c.items[exec.containerID().EncodeToString()] - if !ok { - return nil, nil - } - - return v.ids, v.err -} - -func (c *testStorage) addResult(addr cid.ID, ids []oid.ID, err error) { - c.items[addr.EncodeToString()] = idsErr{ - ids: ids, - err: err, - } -} - -func testSHA256() (cs [sha256.Size]byte) { - rand.Read(cs[:]) - return cs -} - -func generateIDs(num int) []oid.ID { - res := make([]oid.ID, num) - - for i := range num { - res[i].SetSHA256(testSHA256()) - } - - return res -} - -func TestGetLocalOnly(t *testing.T) { - ctx := context.Background() - - newSvc := func(storage *testStorage) *Service { - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t) - svc.localStorage = storage - - return svc - } - - newPrm := func(cnr cid.ID, w IDListWriter) Prm { - p := Prm{} - p.WithContainerID(cnr) - p.SetWriter(w) - p.common = new(util.CommonPrm).WithLocalOnly(true) - - return p - } - - t.Run("OK", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - cnr := cidtest.ID() - ids := generateIDs(10) - storage.addResult(cnr, ids, nil) - - w := new(simpleIDWriter) - p := newPrm(cnr, w) - - err := svc.Search(ctx, p) - require.NoError(t, err) - require.Equal(t, ids, w.ids) - }) - - t.Run("FAIL", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - cnr := cidtest.ID() - testErr := errors.New("any error") - storage.addResult(cnr, nil, testErr) - - w := new(simpleIDWriter) - p := newPrm(cnr, w) - - err := svc.Search(ctx, p) - require.ErrorIs(t, err, testErr) - }) - t.Run("FAIL while writing ID", func(t *testing.T) { - storage := newTestStorage() - svc := newSvc(storage) - - cnr := cidtest.ID() - storage.addResult(cnr, []oid.ID{oidtest.ID()}, nil) - - testErr := errors.New("any error") - w := errIDWriter{testErr} - p := newPrm(cnr, w) - - err := svc.Search(ctx, p) - require.ErrorIs(t, err, testErr) - }) -} - -func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { - mNodes := make([][]netmap.NodeInfo, len(dim)) - mAddr := make([][]string, len(dim)) - - for i := range dim { - ns := make([]netmap.NodeInfo, dim[i]) - as := make([]string, dim[i]) - - for j := range dim[i] { - a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", - strconv.Itoa(i), - strconv.Itoa(60000+j), - ) - - var ni netmap.NodeInfo - ni.SetNetworkEndpoints(a) - - var na network.AddressGroup - - err := na.FromIterator(netmapcore.Node(ni)) - require.NoError(t, err) - - as[j] = network.StringifyGroup(na) - - ns[j] = ni - } - - mNodes[i] = ns - mAddr[i] = as - } - - return mNodes, mAddr -} - -func TestGetRemoteSmall(t *testing.T) { - ctx := context.Background() - - placementDim := []int{2} - - rs := make([]netmap.ReplicaDescriptor, len(placementDim)) - for i := range placementDim { - rs[i].SetNumberOfObjects(uint32(placementDim[i])) - } - - var pp netmap.PlacementPolicy - pp.AddReplicas(rs...) - - var cnr container.Container - cnr.SetPlacementPolicy(pp) - - var id cid.ID - container.CalculateID(&id, cnr) - - newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service { - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t) - svc.localStorage = newTestStorage() - - const curEpoch = 13 - - svc.traverserGenerator = &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: b, - }, - } - svc.clientConstructor = c - svc.currentEpochReceiver = testEpochReceiver(curEpoch) - - return svc - } - - newPrm := func(id cid.ID, w IDListWriter) Prm { - p := Prm{} - p.WithContainerID(id) - p.SetWriter(w) - p.common = new(util.CommonPrm).WithLocalOnly(false) - - return p - } - - var addr oid.Address - addr.SetContainer(id) - - ns, as := testNodeMatrix(t, placementDim) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - }, - } - - c1 := newTestStorage() - ids1 := generateIDs(10) - - c2 := newTestStorage() - ids2 := generateIDs(10) - - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testStorage{ - as[0][0]: c1, - as[0][1]: c2, - }, - }) - - t.Run("OK", func(t *testing.T) { - c1.addResult(id, ids1, nil) - c2.addResult(id, ids2, nil) - - w := new(simpleIDWriter) - - p := newPrm(id, w) - - err := svc.Search(ctx, p) - require.NoError(t, err) - require.Len(t, w.ids, len(ids1)+len(ids2)) - - for _, id := range append(ids1, ids2...) { - require.Contains(t, w.ids, id) - } - }) - t.Run("non-local fail is not a FAIL", func(t *testing.T) { - testErr := errors.New("opaque") - - c1.addResult(id, ids1, nil) - c2.addResult(id, nil, testErr) - - w := new(simpleIDWriter) - p := newPrm(id, w) - - err := svc.Search(ctx, p) - require.NoError(t, err) - require.Equal(t, ids1, w.ids) - }) - t.Run("client init fail is not a FAIL", func(t *testing.T) { - svc := newSvc(builder, &testClientCache{ - clients: map[string]*testStorage{ - as[0][0]: c1, - }, - }) - c1.addResult(id, ids1, nil) - c2.addResult(id, ids2, nil) - - w := new(simpleIDWriter) - p := newPrm(id, w) - - err := svc.Search(ctx, p) - require.NoError(t, err) - require.Equal(t, ids1, w.ids) - }) - t.Run("context is respected", func(t *testing.T) { - c1.addResult(id, ids1, nil) - c2.addResult(id, ids2, nil) - - w := new(simpleIDWriter) - p := newPrm(id, w) - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - err := svc.Search(ctx, p) - require.NoError(t, err) - require.Empty(t, w.ids) - }) -} - -func TestGetFromPastEpoch(t *testing.T) { - ctx := context.Background() - - placementDim := []int{2, 2} - - rs := make([]netmap.ReplicaDescriptor, len(placementDim)) - - for i := range placementDim { - rs[i].SetNumberOfObjects(uint32(placementDim[i])) - } - - var pp netmap.PlacementPolicy - pp.AddReplicas(rs...) - - var cnr container.Container - cnr.SetPlacementPolicy(pp) - - var idCnr cid.ID - container.CalculateID(&idCnr, cnr) - - var addr oid.Address - addr.SetContainer(idCnr) - - ns, as := testNodeMatrix(t, placementDim) - - c11 := newTestStorage() - ids11 := generateIDs(10) - c11.addResult(idCnr, ids11, nil) - - c12 := newTestStorage() - ids12 := generateIDs(10) - c12.addResult(idCnr, ids12, nil) - - c21 := newTestStorage() - ids21 := generateIDs(10) - c21.addResult(idCnr, ids21, nil) - - c22 := newTestStorage() - ids22 := generateIDs(10) - c22.addResult(idCnr, ids22, nil) - - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t) - svc.localStorage = newTestStorage() - - const curEpoch = 13 - - svc.traverserGenerator = &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns[:1], - }, - }, - curEpoch - 1: &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns[1:], - }, - }, - }, - } - - svc.clientConstructor = &testClientCache{ - clients: map[string]*testStorage{ - as[0][0]: c11, - as[0][1]: c12, - as[1][0]: c21, - as[1][1]: c22, - }, - } - - svc.currentEpochReceiver = testEpochReceiver(curEpoch) - - w := new(simpleIDWriter) - - p := Prm{} - p.WithContainerID(idCnr) - p.SetWriter(w) - - commonPrm := new(util.CommonPrm) - p.SetCommonParameters(commonPrm) - - assertContains := func(idsList ...[]oid.ID) { - var sz int - - for _, ids := range idsList { - sz += len(ids) - - for _, id := range ids { - require.Contains(t, w.ids, id) - } - } - - require.Len(t, w.ids, sz) - } - - err := svc.Search(ctx, p) - require.NoError(t, err) - assertContains(ids11, ids12) - - commonPrm.SetNetmapLookupDepth(1) - w = new(simpleIDWriter) - p.SetWriter(w) - - err = svc.Search(ctx, p) - require.NoError(t, err) - assertContains(ids11, ids12, ids21, ids22) -} - -func TestGetWithSessionToken(t *testing.T) { - ctx := context.Background() - - placementDim := []int{2} - - rs := make([]netmap.ReplicaDescriptor, len(placementDim)) - for i := range placementDim { - rs[i].SetNumberOfObjects(uint32(placementDim[i])) - } - - var pp netmap.PlacementPolicy - pp.AddReplicas(rs...) - - var cnr container.Container - cnr.SetPlacementPolicy(pp) - - var id cid.ID - container.CalculateID(&id, cnr) - - var addr oid.Address - addr.SetContainer(id) - - ns, as := testNodeMatrix(t, placementDim) - - builder := &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns, - }, - } - - localStorage := newTestStorage() - localIDs := generateIDs(10) - localStorage.addResult(id, localIDs, nil) - - c1 := newTestStorage() - ids1 := generateIDs(10) - c1.addResult(id, ids1, nil) - - c2 := newTestStorage() - ids2 := generateIDs(10) - c2.addResult(id, ids2, nil) - - w := new(simpleIDWriter) - - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t) - svc.localStorage = localStorage - - const curEpoch = 13 - - svc.traverserGenerator = &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: builder, - }, - } - svc.clientConstructor = &testClientCache{ - clients: map[string]*testStorage{ - as[0][0]: c1, - as[0][1]: c2, - }, - } - - svc.currentEpochReceiver = testEpochReceiver(curEpoch) - - metaStub := &metaStub{ - TTL: 5, - LimitByObjectIDs: append(append(localIDs[:5], ids1[:5]...), ids2[:5]...), - T: t, - Exp: 20, - ContainerID: id, - } - - p := Prm{} - p.WithContainerID(id) - p.SetWriter(w) - var err error - p.common, err = util.CommonPrmFromV2(metaStub) - require.NoError(t, err) - - err = svc.Search(ctx, p) - require.NoError(t, err) - require.Len(t, w.ids, 15) - - for _, id := range metaStub.LimitByObjectIDs { - require.Contains(t, w.ids, id) - } -} - -type metaStub struct { - TTL uint32 - Exp uint64 - LimitByObjectIDs []oid.ID - T *testing.T - ContainerID cid.ID -} - -func (m *metaStub) GetMetaHeader() *session.RequestMetaHeader { - var result session.RequestMetaHeader - result.SetTTL(m.TTL) - - tokenObj := new(sessionsdk.Object) - tokenObj.ForVerb(sessionsdk.VerbObjectSearch) - tokenObj.LimitByObjects(m.LimitByObjectIDs...) - tokenObj.SetID(uuid.New()) - tokenObj.SetExp(m.Exp) - tokenObj.BindContainer(m.ContainerID) - - pubKey := &frostfsecdsa.PublicKey{} - - tokenObj.SetAuthKey(pubKey) - - priv, err := keys.NewPrivateKey() - require.NoError(m.T, err) - - require.NoError(m.T, tokenObj.Sign(priv.PrivateKey)) - - var token session.Token - tokenObj.WriteToV2(&token) - - result.SetSessionToken(&token) - - return &result -} diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go deleted file mode 100644 index 56fe56468..000000000 --- a/pkg/services/object/search/service.go +++ /dev/null @@ -1,99 +0,0 @@ -package searchsvc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -// Service is an utility serving requests -// of Object.Search service. -type Service struct { - *cfg -} - -// Option is a Service's constructor option. -type Option func(*cfg) - -type searchClient interface { - // searchObjects searches objects on the specified node. - // MUST NOT modify execCtx as it can be accessed concurrently. - searchObjects(context.Context, *execCtx, client.NodeInfo) ([]oid.ID, error) -} - -type ClientConstructor interface { - Get(client.NodeInfo) (client.MultiAddressClient, error) -} - -type cfg struct { - log *logger.Logger - - localStorage interface { - search(context.Context, *execCtx) ([]oid.ID, error) - } - - clientConstructor interface { - get(client.NodeInfo) (searchClient, error) - } - - traverserGenerator interface { - GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) - } - - currentEpochReceiver interface { - Epoch(ctx context.Context) (uint64, error) - } - - keyStore *util.KeyStorage - - containerSource container.Source -} - -// New creates, initializes and returns utility serving -// Object.Get service requests. -func New(e *engine.StorageEngine, - cc ClientConstructor, - tg *util.TraverserGenerator, - ns netmap.Source, - ks *util.KeyStorage, - cs container.Source, - opts ...Option, -) *Service { - c := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - clientConstructor: &clientConstructorWrapper{ - constructor: cc, - }, - localStorage: &storageEngineWrapper{ - storage: e, - }, - traverserGenerator: tg, - currentEpochReceiver: ns, - keyStore: ks, - containerSource: cs, - } - - for i := range opts { - opts[i](c) - } - - return &Service{ - cfg: c, - } -} - -// WithLogger returns option to specify Get service's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go deleted file mode 100644 index 0be5345b9..000000000 --- a/pkg/services/object/search/util.go +++ /dev/null @@ -1,141 +0,0 @@ -package searchsvc - -import ( - "context" - "slices" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type uniqueIDWriter struct { - mtx sync.Mutex - - written map[oid.ID]struct{} - - writer IDListWriter -} - -type clientConstructorWrapper struct { - constructor ClientConstructor -} - -type clientWrapper struct { - client client.MultiAddressClient -} - -type storageEngineWrapper struct { - storage *engine.StorageEngine -} - -func newUniqueAddressWriter(w IDListWriter) *uniqueIDWriter { - if w, ok := w.(*uniqueIDWriter); ok { - return w - } - return &uniqueIDWriter{ - written: make(map[oid.ID]struct{}), - writer: w, - } -} - -func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error { - w.mtx.Lock() - - for i := 0; i < len(list); i++ { // don't use range, slice mutates in body - if _, ok := w.written[list[i]]; !ok { - // mark address as processed - w.written[list[i]] = struct{}{} - continue - } - - // exclude processed address - list = slices.Delete(list, i, i+1) - i-- - } - - w.mtx.Unlock() - - return w.writer.WriteIDs(list) -} - -func (c *clientConstructorWrapper) get(info client.NodeInfo) (searchClient, error) { - clt, err := c.constructor.Get(info) - if err != nil { - return nil, err - } - - return &clientWrapper{ - client: clt, - }, nil -} - -func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info client.NodeInfo) ([]oid.ID, error) { - if exec.prm.forwarder != nil { - return exec.prm.forwarder(ctx, info, c.client) - } - - var sessionInfo *util.SessionInfo - - if tok := exec.prm.common.SessionToken(); tok != nil { - sessionInfo = &util.SessionInfo{ - ID: tok.ID(), - Owner: tok.Issuer(), - } - } - - key, err := exec.svc.keyStore.GetKey(sessionInfo) - if err != nil { - return nil, err - } - - var prm internalclient.SearchObjectsPrm - - prm.SetClient(c.client) - prm.SetPrivateKey(key) - prm.SetSessionToken(exec.prm.common.SessionToken()) - prm.SetBearerToken(exec.prm.common.BearerToken()) - prm.SetTTL(exec.prm.common.TTL()) - prm.SetXHeaders(exec.prm.common.XHeaders()) - prm.SetNetmapEpoch(exec.curProcEpoch) - prm.SetContainerID(exec.containerID()) - prm.SetFilters(exec.searchFilters()) - - res, err := internalclient.SearchObjects(ctx, prm) - if err != nil { - return nil, err - } - - return res.IDList(), nil -} - -func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { - cnr, err := exec.getContainer(ctx) - if err != nil { - return nil, err - } - var selectPrm engine.SelectPrm - selectPrm.WithFilters(exec.searchFilters()) - selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr)) - - r, err := e.storage.Select(ctx, selectPrm) - if err != nil { - return nil, err - } - - return idsFromAddresses(r.AddressList()), nil -} - -func idsFromAddresses(addrs []oid.Address) []oid.ID { - ids := make([]oid.ID, len(addrs)) - - for i := range addrs { - ids[i] = addrs[i].Object() - } - - return ids -} diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go deleted file mode 100644 index 7bb6e4d3c..000000000 --- a/pkg/services/object/search/v2/request_forwarder.go +++ /dev/null @@ -1,99 +0,0 @@ -package searchsvc - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "io" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type requestForwarder struct { - OnceResign sync.Once - Request *objectV2.SearchRequest - Key *ecdsa.PrivateKey -} - -func (f *requestForwarder) forwardRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) ([]oid.ID, error) { - var err error - - // once compose and resign forwarding request - f.OnceResign.Do(func() { - // compose meta header of the local server - metaHdr := new(session.RequestMetaHeader) - metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1) - // TODO: #1165 think how to set the other fields - metaHdr.SetOrigin(f.Request.GetMetaHeader()) - - f.Request.SetMetaHeader(metaHdr) - - err = signature.SignServiceMessage(f.Key, f.Request) - }) - - if err != nil { - return nil, err - } - - var searchStream *rpc.SearchResponseReader - err = c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { - searchStream, err = rpc.SearchObjects(cli, f.Request, rpcclient.WithContext(ctx)) - return err - }) - if err != nil { - return nil, err - } - - // code below is copy-pasted from c.SearchObjects implementation, - // perhaps it is worth highlighting the utility function in frostfs-api-go - var ( - searchResult []oid.ID - resp = new(objectV2.SearchResponse) - ) - - for { - // receive message from server stream - err := searchStream.Read(resp) - if err != nil { - if errors.Is(err, io.EOF) { - break - } - - return nil, fmt.Errorf("reading the response failed: %w", err) - } - - // verify response key - if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil { - return nil, err - } - - // verify response structure - if err := signature.VerifyServiceMessage(resp); err != nil { - return nil, fmt.Errorf("could not verify %T: %w", resp, err) - } - - chunk := resp.GetBody().GetIDList() - var id oid.ID - - for i := range chunk { - err = id.ReadFromV2(chunk[i]) - if err != nil { - return nil, fmt.Errorf("invalid object ID: %w", err) - } - - searchResult = append(searchResult, id) - } - } - - return searchResult, nil -} diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go deleted file mode 100644 index 856cd9f04..000000000 --- a/pkg/services/object/search/v2/service.go +++ /dev/null @@ -1,32 +0,0 @@ -package searchsvc - -import ( - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" - objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -// Service implements Search operation of Object service v2. -type Service struct { - svc *searchsvc.Service - keyStorage *objutil.KeyStorage -} - -// NewService constructs Service instance from provided options. -func NewService(s *searchsvc.Service, ks *objutil.KeyStorage) *Service { - return &Service{ - svc: s, - keyStorage: ks, - } -} - -// Search calls internal service and returns v2 object stream. -func (s *Service) Search(req *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - p, err := s.toPrm(req, stream) - if err != nil { - return err - } - - return s.svc.Search(stream.Context(), *p) -} diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go deleted file mode 100644 index 93b281343..000000000 --- a/pkg/services/object/search/v2/streamer.go +++ /dev/null @@ -1,29 +0,0 @@ -package searchsvc - -import ( - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type streamWriter struct { - stream objectSvc.SearchStream -} - -func (s *streamWriter) WriteIDs(ids []oid.ID) error { - r := new(object.SearchResponse) - - body := new(object.SearchResponseBody) - r.SetBody(body) - - idsV2 := make([]refs.ObjectID, len(ids)) - - for i := range ids { - ids[i].WriteToV2(&idsV2[i]) - } - - body.SetIDList(idsV2) - - return s.stream.Send(r) -} diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go deleted file mode 100644 index 48ae98958..000000000 --- a/pkg/services/object/search/v2/util.go +++ /dev/null @@ -1,95 +0,0 @@ -package searchsvc - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStream) (*searchsvc.Prm, error) { - body := req.GetBody() - - cnrV2 := body.GetContainerID() - if cnrV2 == nil { - return nil, errors.New("missing container ID") - } - - var id cid.ID - - err := id.ReadFromV2(*cnrV2) - if err != nil { - return nil, fmt.Errorf("invalid container ID: %w", err) - } - - commonPrm, err := util.CommonPrmFromV2(req) - if err != nil { - return nil, err - } - - p := new(searchsvc.Prm) - p.SetCommonParameters(commonPrm) - - p.SetWriter(&streamWriter{ - stream: stream, - }) - - if !commonPrm.LocalOnly() { - key, err := s.keyStorage.GetKey(nil) - if err != nil { - return nil, err - } - - forwarder := &requestForwarder{ - Request: req, - Key: key, - } - - p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequest)) - } - - p.WithContainerID(id) - p.WithSearchFilters(objectSDK.NewSearchFiltersFromV2(body.GetFilters())) - - return p, nil -} - -func groupAddressRequestForwarder(f func(context.Context, network.Address, client.MultiAddressClient, []byte) ([]oid.ID, error)) searchsvc.RequestForwarder { - return func(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) ([]oid.ID, error) { - var ( - firstErr error - res []oid.ID - - key = info.PublicKey() - ) - - info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { - var err error - - defer func() { - stop = err == nil - - if stop || firstErr == nil { - firstErr = err - } - - // would be nice to log otherwise - }() - - res, err = f(ctx, addr, c, key) - - return - }) - - return res, firstErr - } -} diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go deleted file mode 100644 index e65293977..000000000 --- a/pkg/services/object/server.go +++ /dev/null @@ -1,52 +0,0 @@ -package object - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -// GetObjectStream is an interface of FrostFS API v2 compatible object streamer. -type GetObjectStream interface { - util.ServerStream - Send(*object.GetResponse) error -} - -// GetObjectRangeStream is an interface of FrostFS API v2 compatible payload range streamer. -type GetObjectRangeStream interface { - util.ServerStream - Send(*object.GetRangeResponse) error -} - -// SearchStream is an interface of FrostFS API v2 compatible search streamer. -type SearchStream interface { - util.ServerStream - Send(*object.SearchResponse) error -} - -// PutObjectStream is an interface of FrostFS API v2 compatible client's object streamer. -type PutObjectStream interface { - Send(context.Context, *object.PutRequest) error - CloseAndRecv(context.Context) (*object.PutResponse, error) -} - -// PatchObjectStream is an interface of FrostFS API v2 compatible patch streamer. -type PatchObjectStream interface { - Send(context.Context, *object.PatchRequest) error - CloseAndRecv(context.Context) (*object.PatchResponse, error) -} - -// ServiceServer is an interface of utility -// serving v2 Object service. -type ServiceServer interface { - Get(*object.GetRequest, GetObjectStream) error - Put(context.Context) (PutObjectStream, error) - Patch(context.Context) (PatchObjectStream, error) - Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error) - Search(*object.SearchRequest, SearchStream) error - Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error) - GetRange(*object.GetRangeRequest, GetObjectRangeStream) error - GetRangeHash(context.Context, *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) - PutSingle(context.Context, *object.PutSingleRequest) (*object.PutSingleResponse, error) -} diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go deleted file mode 100644 index fd8e926dd..000000000 --- a/pkg/services/object/sign.go +++ /dev/null @@ -1,254 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -type SignService struct { - sigSvc *util.SignService - - svc ServiceServer -} - -type searchStreamSigner struct { - SearchStream - sigSvc *util.SignService - - nonEmptyResp bool // set on first Send call -} - -type getStreamSigner struct { - GetObjectStream - sigSvc *util.SignService -} - -type putStreamSigner struct { - sigSvc *util.SignService - stream PutObjectStream - err error -} - -type patchStreamSigner struct { - sigSvc *util.SignService - stream PatchObjectStream - err error -} - -type getRangeStreamSigner struct { - GetObjectRangeStream - sigSvc *util.SignService -} - -func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService { - return &SignService{ - sigSvc: util.NewUnarySignService(key), - svc: svc, - } -} - -func (s *getStreamSigner) Send(resp *object.GetResponse) error { - return s.send(resp, nil) -} - -func (s *getStreamSigner) send(resp *object.GetResponse, err error) error { - if err := s.sigSvc.SignResponse(resp, err); err != nil { - return err - } - return s.GetObjectStream.Send(resp) -} - -func (s *SignService) Get(req *object.GetRequest, stream GetObjectStream) error { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.GetResponse) - _ = s.sigSvc.SignResponse(resp, err) - return stream.Send(resp) - } - - w := &getStreamSigner{ - GetObjectStream: stream, - sigSvc: s.sigSvc, - } - if err := s.svc.Get(req, w); err != nil { - return w.send(new(object.GetResponse), err) - } - return nil -} - -func (s *putStreamSigner) Send(ctx context.Context, req *object.PutRequest) error { - if s.err = s.sigSvc.VerifyRequest(req); s.err != nil { - return util.ErrAbortStream - } - if s.err = s.stream.Send(ctx, req); s.err != nil { - return util.ErrAbortStream - } - return nil -} - -func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutResponse, err error) { - if s.err != nil { - err = s.err - resp = new(object.PutResponse) - } else { - resp, err = s.stream.CloseAndRecv(ctx) - if err != nil { - err = fmt.Errorf("could not close stream and receive response: %w", err) - resp = new(object.PutResponse) - } - } - - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) { - stream, err := s.svc.Put(ctx) - if err != nil { - return nil, fmt.Errorf("could not create Put object streamer: %w", err) - } - - return &putStreamSigner{ - stream: stream, - sigSvc: s.sigSvc, - }, nil -} - -func (s *patchStreamSigner) Send(ctx context.Context, req *object.PatchRequest) error { - if s.err = s.sigSvc.VerifyRequest(req); s.err != nil { - return util.ErrAbortStream - } - if s.err = s.stream.Send(ctx, req); s.err != nil { - return util.ErrAbortStream - } - return nil -} - -func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PatchResponse, err error) { - if s.err != nil { - err = s.err - resp = new(object.PatchResponse) - } else { - resp, err = s.stream.CloseAndRecv(ctx) - if err != nil { - err = fmt.Errorf("could not close stream and receive response: %w", err) - resp = new(object.PatchResponse) - } - } - - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) { - stream, err := s.svc.Patch(ctx) - if err != nil { - return nil, fmt.Errorf("could not create Put object streamer: %w", err) - } - - return &patchStreamSigner{ - stream: stream, - sigSvc: s.sigSvc, - }, nil -} - -func (s *SignService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.HeadResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Head(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *SignService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - req.GetBody().SetMarshalData(req.GetBody().StableMarshal(nil)) - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.PutSingleResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.PutSingle(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *searchStreamSigner) Send(resp *object.SearchResponse) error { - s.nonEmptyResp = true - return s.send(resp, nil) -} - -func (s *searchStreamSigner) send(resp *object.SearchResponse, err error) error { - if err := s.sigSvc.SignResponse(resp, err); err != nil { - return err - } - return s.SearchStream.Send(resp) -} - -func (s *SignService) Search(req *object.SearchRequest, stream SearchStream) error { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.SearchResponse) - _ = s.sigSvc.SignResponse(resp, err) - return stream.Send(resp) - } - - ss := &searchStreamSigner{ - SearchStream: stream, - sigSvc: s.sigSvc, - } - err := s.svc.Search(req, ss) - if err != nil || !ss.nonEmptyResp { - // The higher component does not write any response in the case of an empty result (which is correct). - // With the introduction of status returns at least one answer must be signed and sent to the client. - // This approach is supported by clients who do not know how to work with statuses (one could make - // a switch according to the protocol version from the request, but the costs of sending an empty - // answer can be neglected due to the gradual refusal to use the "old" clients). - return ss.send(new(object.SearchResponse), err) - } - return nil -} - -func (s *SignService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.DeleteResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Delete(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} - -func (s *getRangeStreamSigner) Send(resp *object.GetRangeResponse) error { - return s.send(resp, nil) -} - -func (s *getRangeStreamSigner) send(resp *object.GetRangeResponse, err error) error { - if err := s.sigSvc.SignResponse(resp, err); err != nil { - return err - } - return s.GetObjectRangeStream.Send(resp) -} - -func (s *SignService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.GetRangeResponse) - _ = s.sigSvc.SignResponse(resp, err) - return stream.Send(resp) - } - - w := &getRangeStreamSigner{ - GetObjectRangeStream: stream, - sigSvc: s.sigSvc, - } - if err := s.svc.GetRange(req, w); err != nil { - return w.send(new(object.GetRangeResponse), err) - } - return nil -} - -func (s *SignService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(object.GetRangeHashResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.GetRangeHash(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go deleted file mode 100644 index b446d3605..000000000 --- a/pkg/services/object/transport_splitter.go +++ /dev/null @@ -1,189 +0,0 @@ -package object - -import ( - "bytes" - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" -) - -type ( - TransportSplitter struct { - next ServiceServer - - chunkSize uint64 - addrAmount uint64 - } - - getStreamMsgSizeCtrl struct { - util.ServerStream - - stream GetObjectStream - - chunkSize int - } - - searchStreamMsgSizeCtrl struct { - util.ServerStream - - stream SearchStream - - addrAmount uint64 - } - - rangeStreamMsgSizeCtrl struct { - util.ServerStream - - stream GetObjectRangeStream - - chunkSize int - } -) - -func (s *getStreamMsgSizeCtrl) Send(resp *object.GetResponse) error { - body := resp.GetBody() - - part := body.GetObjectPart() - - chunkPart, ok := part.(*object.GetObjectPartChunk) - if !ok { - return s.stream.Send(resp) - } - - var newResp *object.GetResponse - - for buf := bytes.NewBuffer(chunkPart.GetChunk()); buf.Len() > 0; { - if newResp == nil { - newResp = new(object.GetResponse) - newResp.SetBody(body) - } - - chunkPart.SetChunk(buf.Next(s.chunkSize)) - newResp.SetMetaHeader(resp.GetMetaHeader()) - newResp.SetVerificationHeader(resp.GetVerificationHeader()) - - if err := s.stream.Send(newResp); err != nil { - return err - } - } - - return nil -} - -func NewTransportSplitter(size, amount uint64, next ServiceServer) *TransportSplitter { - return &TransportSplitter{ - next: next, - chunkSize: size, - addrAmount: amount, - } -} - -func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream) error { - return c.next.Get(req, &getStreamMsgSizeCtrl{ - ServerStream: stream, - stream: stream, - chunkSize: int(c.chunkSize), - }) -} - -func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) { - return c.next.Put(ctx) -} - -func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) { - return c.next.Patch(ctx) -} - -func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) { - return c.next.Head(ctx, request) -} - -func (c TransportSplitter) Search(req *object.SearchRequest, stream SearchStream) error { - return c.next.Search(req, &searchStreamMsgSizeCtrl{ - ServerStream: stream, - stream: stream, - addrAmount: c.addrAmount, - }) -} - -func (c TransportSplitter) Delete(ctx context.Context, request *object.DeleteRequest) (*object.DeleteResponse, error) { - return c.next.Delete(ctx, request) -} - -func (c TransportSplitter) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - return c.next.PutSingle(ctx, req) -} - -func (s *rangeStreamMsgSizeCtrl) Send(resp *object.GetRangeResponse) error { - body := resp.GetBody() - - chunkPart, ok := body.GetRangePart().(*object.GetRangePartChunk) - if !ok { - return s.stream.Send(resp) - } - - var newResp *object.GetRangeResponse - - for buf := bytes.NewBuffer(chunkPart.GetChunk()); buf.Len() > 0; { - if newResp == nil { - newResp = new(object.GetRangeResponse) - newResp.SetBody(body) - } - - chunkPart.SetChunk(buf.Next(s.chunkSize)) - body.SetRangePart(chunkPart) - newResp.SetMetaHeader(resp.GetMetaHeader()) - newResp.SetVerificationHeader(resp.GetVerificationHeader()) - - if err := s.stream.Send(newResp); err != nil { - return err - } - } - - return nil -} - -func (c TransportSplitter) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error { - return c.next.GetRange(req, &rangeStreamMsgSizeCtrl{ - ServerStream: stream, - stream: stream, - chunkSize: int(c.chunkSize), - }) -} - -func (c TransportSplitter) GetRangeHash(ctx context.Context, request *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - return c.next.GetRangeHash(ctx, request) -} - -func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error { - body := resp.GetBody() - ids := body.GetIDList() - - var newResp *object.SearchResponse - - for { - if newResp == nil { - newResp = new(object.SearchResponse) - newResp.SetBody(body) - } - - cut := min(s.addrAmount, uint64(len(ids))) - - body.SetIDList(ids[:cut]) - newResp.SetMetaHeader(resp.GetMetaHeader()) - newResp.SetVerificationHeader(resp.GetVerificationHeader()) - - if err := s.stream.Send(newResp); err != nil { - return err - } - - ids = ids[cut:] - - if len(ids) == 0 { - break - } - } - - return nil -} diff --git a/pkg/services/object/util/chain.go b/pkg/services/object/util/chain.go deleted file mode 100644 index b574d5eb6..000000000 --- a/pkg/services/object/util/chain.go +++ /dev/null @@ -1,184 +0,0 @@ -package util - -import ( - "errors" - "fmt" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// HeadReceiver is an interface of entity that can receive -// object header or the information about the object relations. -type HeadReceiver interface { - // Head must return one of: - // * object header (*object.Object); - // * structured information about split-chain (*object.SplitInfo). - Head(id oid.Address) (any, error) -} - -// SplitMemberHandler is a handler of next split-chain element. -// -// If reverseDirection arg is true, then the traversal is done in reverse order. -// Stop boolean result provides the ability to interrupt the traversal. -type SplitMemberHandler func(member *objectSDK.Object, reverseDirection bool) (stop bool) - -// IterateAllSplitLeaves is an iterator over all object split-tree leaves in direct order. -func IterateAllSplitLeaves(r HeadReceiver, addr oid.Address, h func(*objectSDK.Object)) error { - return IterateSplitLeaves(r, addr, func(leaf *objectSDK.Object) bool { - h(leaf) - return false - }) -} - -// IterateSplitLeaves is an iterator over object split-tree leaves in direct order. -// -// If member handler returns true, then the iterator aborts without error. -func IterateSplitLeaves(r HeadReceiver, addr oid.Address, h func(*objectSDK.Object) bool) error { - var ( - reverse bool - leaves []*objectSDK.Object - ) - - if err := TraverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) { - reverse = reverseDirection - - if reverse { - leaves = append(leaves, member) - return false - } - - return h(member) - }); err != nil { - return err - } - - for i := len(leaves) - 1; i >= 0; i-- { - if h(leaves[i]) { - break - } - } - - return nil -} - -// TraverseSplitChain is an iterator over object split-tree leaves. -// -// Traversal occurs in one of two directions, which depends on what pslit info was received: -// * in direct order for link part; -// * in reverse order for last part. -func TraverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler) error { - _, err := traverseSplitChain(r, addr, h) - return err -} - -func traverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler) (bool, error) { - v, err := r.Head(addr) - if err != nil { - return false, err - } - - cnr := addr.Container() - - switch res := v.(type) { - default: - panic(fmt.Sprintf("unexpected result of %T: %T", r, v)) - case *objectSDK.Object: - return h(res, false), nil - case *objectSDK.SplitInfo: - link, withLink := res.Link() - last, withLast := res.LastPart() - - switch { - default: - return false, errors.New("lack of split information") - case withLink: - return traverseByLink(cnr, link, r, h) - case withLast: - return traverseByLast(cnr, last, withLast, res, r, h) - } - } -} - -func traverseByLink(cnr cid.ID, link oid.ID, r HeadReceiver, h SplitMemberHandler) (bool, error) { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(link) - - chain := make([]oid.ID, 0) - - if _, err := traverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) { - children := member.Children() - - if reverseDirection { - chain = append(children, chain...) - } else { - chain = append(chain, children...) - } - - return false - }); err != nil { - return false, err - } - - var reverseChain []*objectSDK.Object - - for i := range chain { - addr.SetObject(chain[i]) - - if stop, err := traverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) { - if !reverseDirection { - return h(member, false) - } - - reverseChain = append(reverseChain, member) - return false - }); err != nil || stop { - return stop, err - } - } - - for i := len(reverseChain) - 1; i >= 0; i-- { - if h(reverseChain[i], false) { - return true, nil - } - } - return false, nil -} - -func traverseByLast(cnr cid.ID, last oid.ID, withLast bool, res *objectSDK.SplitInfo, r HeadReceiver, h SplitMemberHandler) (bool, error) { - var addr oid.Address - addr.SetContainer(cnr) - - for last, withLast = res.LastPart(); withLast; { - addr.SetObject(last) - - var directChain []*objectSDK.Object - - if _, err := traverseSplitChain(r, addr, func(member *objectSDK.Object, reverseDirection bool) (stop bool) { - if reverseDirection { - last, withLast = member.PreviousID() - return h(member, true) - } - - directChain = append(directChain, member) - - return false - }); err != nil { - return false, err - } - - for i := len(directChain) - 1; i >= 0; i-- { - if h(directChain[i], true) { - return true, nil - } - } - - if len(directChain) > 0 { - last, withLast = directChain[len(directChain)-1].PreviousID() - } - } - - return false, nil -} diff --git a/pkg/services/object/util/key.go b/pkg/services/object/util/key.go deleted file mode 100644 index 23d6c1c68..000000000 --- a/pkg/services/object/util/key.go +++ /dev/null @@ -1,80 +0,0 @@ -package util - -import ( - "crypto/ecdsa" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/google/uuid" -) - -// SessionSource is an interface tha provides -// access to node's actual (not expired) session -// tokens. -type SessionSource interface { - // Get must return non-expired private token that - // corresponds with passed owner and tokenID. If - // token has not been created, has been expired - // of it is impossible to get information about the - // token Get must return nil. - Get(owner user.ID, tokenID []byte) *storage.PrivateToken -} - -// KeyStorage represents private key storage of the local node. -type KeyStorage struct { - key *ecdsa.PrivateKey - - tokenStore SessionSource - - networkState netmap.State -} - -// NewKeyStorage creates, initializes and returns new KeyStorage instance. -func NewKeyStorage(localKey *ecdsa.PrivateKey, tokenStore SessionSource, net netmap.State) *KeyStorage { - return &KeyStorage{ - key: localKey, - tokenStore: tokenStore, - networkState: net, - } -} - -// SessionInfo groups information about FrostFS Object session -// which is reflected in KeyStorage. -type SessionInfo struct { - // Session unique identifier. - ID uuid.UUID - - // Session issuer. - Owner user.ID -} - -// GetKey fetches private key depending on the SessionInfo. -// -// If info is not `nil`, searches for dynamic session token through the -// underlying token storage. Returns apistatus.SessionTokenNotFound if -// token storage does not contain information about provided dynamic session. -// -// If info is `nil`, returns node's private key. -func (s *KeyStorage) GetKey(info *SessionInfo) (*ecdsa.PrivateKey, error) { - if info != nil { - binID, err := info.ID.MarshalBinary() - if err != nil { - return nil, fmt.Errorf("marshal ID: %w", err) - } - - pToken := s.tokenStore.Get(info.Owner, binID) - if pToken != nil { - if pToken.ExpiredAt() < s.networkState.CurrentEpoch() { - return nil, new(apistatus.SessionTokenExpired) - } - return pToken.SessionKey(), nil - } - - return nil, new(apistatus.SessionTokenNotFound) - } - - return s.key, nil -} diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go deleted file mode 100644 index 1753a26f7..000000000 --- a/pkg/services/object/util/key_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package util_test - -import ( - "context" - "crypto/elliptic" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestNewKeyStorage(t *testing.T) { - nodeKey, err := keys.NewPrivateKey() - require.NoError(t, err) - - tokenStor := tokenStorage.NewTokenStore() - stor := util.NewKeyStorage(&nodeKey.PrivateKey, tokenStor, mockedNetworkState{42}) - - owner := usertest.ID() - - t.Run("node key", func(t *testing.T) { - key, err := stor.GetKey(nil) - require.NoError(t, err) - require.Equal(t, nodeKey.PrivateKey, *key) - }) - - t.Run("unknown token", func(t *testing.T) { - _, err = stor.GetKey(&util.SessionInfo{ - ID: uuid.New(), - Owner: usertest.ID(), - }) - require.Error(t, err) - }) - - t.Run("known token", func(t *testing.T) { - tok := createToken(t, tokenStor, owner, 100) - - key, err := stor.GetKey(&util.SessionInfo{ - ID: tok.ID(), - Owner: owner, - }) - require.NoError(t, err) - require.True(t, tok.AssertAuthKey((*frostfsecdsa.PublicKey)(&key.PublicKey))) - }) - - t.Run("expired token", func(t *testing.T) { - tok := createToken(t, tokenStor, owner, 30) - _, err := stor.GetKey(&util.SessionInfo{ - ID: tok.ID(), - Owner: owner, - }) - require.Error(t, err) - }) -} - -func createToken(t *testing.T, store *tokenStorage.TokenStore, owner user.ID, exp uint64) session.Object { - var ownerV2 refs.OwnerID - owner.WriteToV2(&ownerV2) - - req := new(sessionV2.CreateRequestBody) - req.SetOwnerID(&ownerV2) - req.SetExpiration(exp) - - resp, err := store.Create(context.Background(), req) - require.NoError(t, err) - - pub, err := keys.NewPublicKeyFromBytes(resp.GetSessionKey(), elliptic.P256()) - require.NoError(t, err) - - var id uuid.UUID - require.NoError(t, id.UnmarshalBinary(resp.GetID())) - - var tok session.Object - tok.SetAuthKey((*frostfsecdsa.PublicKey)(pub)) - tok.SetID(id) - - return tok -} - -type mockedNetworkState struct { - value uint64 -} - -func (m mockedNetworkState) CurrentEpoch() uint64 { - return m.value -} diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go deleted file mode 100644 index b10826226..000000000 --- a/pkg/services/object/util/log.go +++ /dev/null @@ -1,19 +0,0 @@ -package util - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// LogServiceError writes error message of object service to provided logger. -func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error(ctx, logs.UtilObjectServiceError, - zap.String("node", network.StringifyGroup(node)), - zap.String("request", req), - zap.Error(err), - ) -} diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go deleted file mode 100644 index f74b0aab9..000000000 --- a/pkg/services/object/util/placement.go +++ /dev/null @@ -1,170 +0,0 @@ -package util - -import ( - "context" - "fmt" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type localPlacement struct { - builder placement.Builder - - netmapKeys netmap.AnnouncedKeys -} - -type remotePlacement struct { - builder placement.Builder - - netmapKeys netmap.AnnouncedKeys -} - -// TraverserGenerator represents tool that generates -// container traverser for the particular need. -type TraverserGenerator struct { - netMapSrc netmap.Source - - cnrSrc container.Source - - netmapKeys netmap.AnnouncedKeys - - customOpts []placement.Option -} - -func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Builder { - return &localPlacement{ - builder: b, - netmapKeys: s, - } -} - -func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) - if err != nil { - return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) - } - - for i := range vs { - for j := range vs[i] { - var addr network.AddressGroup - - err := addr.FromIterator(network.NodeEndpointsIterator(vs[i][j])) - if err != nil { - continue - } - - if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - return [][]netmapSDK.NodeInfo{{vs[i][j]}}, nil - } - } - } - - return nil, fmt.Errorf("(%T) local node is outside of object placement", p) -} - -// NewRemotePlacementBuilder creates, initializes and returns placement builder that -// excludes local node from any placement vector. -func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) placement.Builder { - return &remotePlacement{ - builder: b, - netmapKeys: s, - } -} - -func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) - if err != nil { - return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) - } - - for i := range vs { - for j := 0; j < len(vs[i]); j++ { - var addr network.AddressGroup - - err := addr.FromIterator(network.NodeEndpointsIterator(vs[i][j])) - if err != nil { - continue - } - - if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - vs[i] = slices.Delete(vs[i], j, j+1) - j-- - } - } - } - - return vs, nil -} - -// NewTraverserGenerator creates, initializes and returns new TraverserGenerator instance. -func NewTraverserGenerator(nmSrc netmap.Source, cnrSrc container.Source, netmapKeys netmap.AnnouncedKeys) *TraverserGenerator { - return &TraverserGenerator{ - netMapSrc: nmSrc, - cnrSrc: cnrSrc, - netmapKeys: netmapKeys, - } -} - -// WithTraverseOptions returns TraverseGenerator that additionally applies provided options. -func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *TraverserGenerator { - return &TraverserGenerator{ - netMapSrc: g.netMapSrc, - cnrSrc: g.cnrSrc, - netmapKeys: g.netmapKeys, - customOpts: opts, - } -} - -// GenerateTraverser generates placement Traverser for provided object address -// using epoch-th network map. -func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { - // get network map by epoch - nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch) - if err != nil { - return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err) - } - - // get container related container - cnr, err := g.cnrSrc.Get(ctx, idCnr) - if err != nil { - return nil, nil, fmt.Errorf("could not get container: %w", err) - } - - // allocate placement traverser options - traverseOpts := make([]placement.Option, 0, 3+len(g.customOpts)) - traverseOpts = append(traverseOpts, g.customOpts...) - - // create builder of the remote nodes from network map - builder := NewRemotePlacementBuilder( - placement.NewNetworkMapBuilder(nm), - g.netmapKeys, - ) - - traverseOpts = append(traverseOpts, - // set processing container - placement.ForContainer(cnr.Value), - - // set placement builder - placement.UseBuilder(builder), - ) - - if idObj != nil { - traverseOpts = append(traverseOpts, - // set identifier of the processing object - placement.ForObject(*idObj), - ) - } - - t, err := placement.NewTraverser(ctx, traverseOpts...) - if err != nil { - return nil, nil, err - } - return t, cnr, nil -} diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go deleted file mode 100644 index 34d8ec704..000000000 --- a/pkg/services/object/util/prm.go +++ /dev/null @@ -1,180 +0,0 @@ -package util - -import ( - "fmt" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -// maxLocalTTL is maximum TTL for an operation to be considered local. -const maxLocalTTL = 1 - -type CommonPrm struct { - local bool - - netmapEpoch, netmapLookupDepth uint64 - - token *sessionsdk.Object - - bearer *bearer.Token - - ttl uint32 - - xhdrs []string -} - -// TTL returns TTL for new requests. -func (p *CommonPrm) TTL() uint32 { - if p != nil { - return p.ttl - } - - return 1 -} - -// XHeaders returns X-Headers for new requests. -func (p *CommonPrm) XHeaders() []string { - if p != nil { - return p.xhdrs - } - - return nil -} - -func (p *CommonPrm) WithLocalOnly(v bool) *CommonPrm { - if p != nil { - p.local = v - } - - return p -} - -func (p *CommonPrm) LocalOnly() bool { - if p != nil { - return p.local - } - - return false -} - -func (p *CommonPrm) SessionToken() *sessionsdk.Object { - if p != nil { - return p.token - } - - return nil -} - -func (p *CommonPrm) BearerToken() *bearer.Token { - if p != nil { - return p.bearer - } - - return nil -} - -func (p *CommonPrm) NetmapEpoch() uint64 { - if p != nil { - return p.netmapEpoch - } - - return 0 -} - -func (p *CommonPrm) NetmapLookupDepth() uint64 { - if p != nil { - return p.netmapLookupDepth - } - - return 0 -} - -func (p *CommonPrm) SetNetmapLookupDepth(v uint64) { - if p != nil { - p.netmapLookupDepth = v - } -} - -// ForgetTokens forgets all the tokens read from the request's -// meta information before. -func (p *CommonPrm) ForgetTokens() func() { - if p != nil { - tk := p.token - br := p.bearer - p.token = nil - p.bearer = nil - return func() { - p.token = tk - p.bearer = br - } - } - return func() {} -} - -func CommonPrmFromV2(req interface { - GetMetaHeader() *session.RequestMetaHeader -}, -) (*CommonPrm, error) { - meta := req.GetMetaHeader() - ttl := meta.GetTTL() - - // unwrap meta header to get original request meta information - for meta.GetOrigin() != nil { - meta = meta.GetOrigin() - } - - var tokenSession *sessionsdk.Object - var err error - - if tokenSessionV2 := meta.GetSessionToken(); tokenSessionV2 != nil { - tokenSession = new(sessionsdk.Object) - - err = tokenSession.ReadFromV2(*tokenSessionV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - } - - xHdrs := meta.GetXHeaders() - - prm := &CommonPrm{ - local: ttl <= maxLocalTTL, - token: tokenSession, - ttl: ttl - 1, // decrease TTL for new requests - xhdrs: make([]string, 0, 2*len(xHdrs)), - } - - if tok := meta.GetBearerToken(); tok != nil { - prm.bearer = new(bearer.Token) - err = prm.bearer.ReadFromV2(*tok) - if err != nil { - return nil, fmt.Errorf("invalid bearer token: %w", err) - } - } - - for i := range xHdrs { - switch key := xHdrs[i].GetKey(); key { - case session.XHeaderNetmapEpoch: - var err error - - prm.netmapEpoch, err = strconv.ParseUint(xHdrs[i].GetValue(), 10, 64) - if err != nil { - return nil, err - } - case session.XHeaderNetmapLookupDepth: - var err error - - prm.netmapLookupDepth, err = strconv.ParseUint(xHdrs[i].GetValue(), 10, 64) - if err != nil { - return nil, err - } - default: - prm.xhdrs = append(prm.xhdrs, key, xHdrs[i].GetValue()) - } - } - - return prm, nil -} diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go deleted file mode 100644 index 2a8460ca5..000000000 --- a/pkg/services/object_manager/placement/cache.go +++ /dev/null @@ -1,78 +0,0 @@ -package placement - -import ( - "crypto/sha256" - "fmt" - "slices" - "sync" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/hashicorp/golang-lru/v2/simplelru" -) - -// ContainerNodesCache caches results of ContainerNodes() invocation between epochs. -type ContainerNodesCache struct { - // mtx protects lastEpoch and containerCache fields. - mtx sync.Mutex - // lastEpoch contains network map epoch for all values in the container cache. - lastEpoch uint64 - // containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if - // neither netmap nor container has changed. - containerCache simplelru.LRUCache[cid.ID, [][]netmapSDK.NodeInfo] -} - -// defaultContainerCacheSize is the default size for the container cache. -const defaultContainerCacheSize = 10 - -// NewContainerNodesCache creates new cache which saves the result of the ContainerNodes() invocations. -// If size is <= 0, defaultContainerCacheSize (10) is used. -func NewContainerNodesCache(size int) *ContainerNodesCache { - if size <= 0 { - size = defaultContainerCacheSize - } - - cache, _ := simplelru.NewLRU[cid.ID, [][]netmapSDK.NodeInfo](size, nil) // no error - return &ContainerNodesCache{ - containerCache: cache, - } -} - -// ContainerNodes returns the result of nm.ContainerNodes(), possibly from the cache. -func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - c.mtx.Lock() - if nm.Epoch() == c.lastEpoch { - raw, ok := c.containerCache.Get(cnr) - c.mtx.Unlock() - if ok { - return c.cloneResult(raw), nil - } - } else { - c.lastEpoch = nm.Epoch() - c.containerCache.Purge() - c.mtx.Unlock() - } - - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - cn, err := nm.ContainerNodes(p, binCnr) - if err != nil { - return nil, fmt.Errorf("could not get container nodes: %w", err) - } - - c.mtx.Lock() - if c.lastEpoch == nm.Epoch() { - c.containerCache.Add(cnr, cn) - } - c.mtx.Unlock() - return c.cloneResult(cn), nil -} - -func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo { - result := make([][]netmapSDK.NodeInfo, len(nodes)) - for repIdx := range nodes { - result[repIdx] = slices.Clone(nodes[repIdx]) - } - return result -} diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go deleted file mode 100644 index 7242970b5..000000000 --- a/pkg/services/object_manager/placement/cache_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package placement_test - -import ( - "strconv" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/stretchr/testify/require" -) - -func TestContainerNodesCache(t *testing.T) { - const size = 3 - - nodes := [6]netmapSDK.NodeInfo{} - for i := range nodes { - nodes[i].SetAttribute("ATTR", strconv.Itoa(i)) - } - - nm := func(epoch uint64, nodes []netmapSDK.NodeInfo) *netmapSDK.NetMap { - var nm netmapSDK.NetMap - nm.SetEpoch(epoch) - nm.SetNodes(nodes) - return &nm - } - - var pp netmapSDK.PlacementPolicy - require.NoError(t, pp.DecodeString("REP 1")) - - t.Run("update netmap on the new epoch", func(t *testing.T) { - c := placement.NewContainerNodesCache(size) - - cnr := cidtest.ID() - res, err := c.ContainerNodes(nm(1, nodes[0:1]), cnr, pp) - require.NoError(t, err) - - // Use other nodes in the argument to ensure the result is taken from cache. - resCached, err := c.ContainerNodes(nm(1, nodes[1:2]), cnr, pp) - require.NoError(t, err) - require.Equal(t, res, resCached) - - // Update epoch, netmap should be purged. - resCached, err = c.ContainerNodes(nm(2, nodes[2:3]), cnr, pp) - require.NoError(t, err) - require.NotEqual(t, res, resCached) - }) - t.Run("cache uses container as a key", func(t *testing.T) { - c := placement.NewContainerNodesCache(size) - - res1, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp) - require.NoError(t, err) - - res2, err := c.ContainerNodes(nm(1, nodes[1:2]), cidtest.ID(), pp) - require.NoError(t, err) - - require.NotEqual(t, res1, res2) - }) - t.Run("cache respects size parameter", func(t *testing.T) { - c := placement.NewContainerNodesCache(size) - - nm1 := nm(1, nodes[0:1]) - nm2 := nm(1, nodes[1:2]) - cnr := [size * 2]cid.ID{} - res := [size * 2][][]netmapSDK.NodeInfo{} - for i := range size * 2 { - cnr[i] = cidtest.ID() - - var err error - res[i], err = c.ContainerNodes(nm1, cnr[i], pp) - require.NoError(t, err) - } - - for i := size; i < size*2; i++ { - r, err := c.ContainerNodes(nm2, cnr[i], pp) - require.NoError(t, err) - require.Equal(t, res[i], r) - } - for i := range size { - r, err := c.ContainerNodes(nm2, cnr[i], pp) - require.NoError(t, err) - require.NotEqual(t, res[i], r) - } - }) - t.Run("the error is propagated", func(t *testing.T) { - var pp netmapSDK.PlacementPolicy - r := netmapSDK.ReplicaDescriptor{} - r.SetNumberOfObjects(1) - r.SetSelectorName("Missing") - pp.AddReplicas(r) - - c := placement.NewContainerNodesCache(size) - _, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp) - require.Error(t, err) - }) -} diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go deleted file mode 100644 index 0f24a9d96..000000000 --- a/pkg/services/object_manager/placement/metrics.go +++ /dev/null @@ -1,185 +0,0 @@ -package placement - -import ( - "errors" - "fmt" - "maps" - "math" - "strings" - "sync" - "sync/atomic" - - locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -const ( - attrPrefix = "$attribute:" - - geoDistance = "$geoDistance" -) - -type Metric interface { - CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int -} - -type metricsParser struct { - locodeDBPath string - locodes map[string]locodedb.Point -} - -type MetricParser interface { - ParseMetrics([]string) ([]Metric, error) -} - -func NewMetricsParser(locodeDBPath string) (MetricParser, error) { - return &metricsParser{ - locodeDBPath: locodeDBPath, - }, nil -} - -func (p *metricsParser) initLocodes() error { - if len(p.locodes) != 0 { - return nil - } - if len(p.locodeDBPath) > 0 { - p.locodes = make(map[string]locodedb.Point) - locodeDB := locodebolt.New(locodebolt.Prm{ - Path: p.locodeDBPath, - }, - locodebolt.ReadOnly(), - ) - err := locodeDB.Open() - if err != nil { - return err - } - defer locodeDB.Close() - err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { - p.locodes[k] = v - }) - if err != nil { - return err - } - return nil - } - return errors.New("set path to locode database") -} - -func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { - var metrics []Metric - for _, raw := range priority { - if attr, found := strings.CutPrefix(raw, attrPrefix); found { - metrics = append(metrics, NewAttributeMetric(attr)) - } else if raw == geoDistance { - err := p.initLocodes() - if err != nil { - return nil, err - } - if len(p.locodes) == 0 { - return nil, fmt.Errorf("provide locodes database for metric %s", raw) - } - m := NewGeoDistanceMetric(p.locodes) - metrics = append(metrics, m) - } else { - return nil, fmt.Errorf("unsupported priority metric %s", raw) - } - } - return metrics, nil -} - -// attributeMetric describes priority metric based on attribute. -type attributeMetric struct { - attribute string -} - -// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and -// the value of attribute is the same. In other case return [1]. -func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { - fromAttr := from.Attribute(am.attribute) - toAttr := to.Attribute(am.attribute) - if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr { - return 0 - } - return 1 -} - -func NewAttributeMetric(attr string) Metric { - return &attributeMetric{attribute: attr} -} - -// geoDistanceMetric describes priority metric based on attribute. -type geoDistanceMetric struct { - locodes map[string]locodedb.Point - distance *atomic.Pointer[map[string]int] - mtx sync.Mutex -} - -func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { - d := atomic.Pointer[map[string]int]{} - m := make(map[string]int) - d.Store(&m) - gm := &geoDistanceMetric{ - locodes: locodes, - distance: &d, - } - return gm -} - -// CalculateValue return distance in kilometers between current node and provided, -// if coordinates for provided node found. In other case return math.MaxInt. -func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { - fl := from.LOCODE() - tl := to.LOCODE() - if fl == tl { - return 0 - } - m := gm.distance.Load() - if v, ok := (*m)[fl+tl]; ok { - return v - } - return gm.calculateDistance(fl, tl) -} - -func (gm *geoDistanceMetric) calculateDistance(from, to string) int { - gm.mtx.Lock() - defer gm.mtx.Unlock() - od := gm.distance.Load() - if v, ok := (*od)[from+to]; ok { - return v - } - nd := maps.Clone(*od) - var dist int - pointFrom, okFrom := gm.locodes[from] - pointTo, okTo := gm.locodes[to] - if okFrom && okTo { - dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) - } else { - dist = math.MaxInt - } - nd[from+to] = dist - gm.distance.Store(&nd) - - return dist -} - -// distance return amount of KM between two points. -// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. -// Original implementation can be found here https://www.geodatasource.com/developers/go. -func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { - radLat1 := math.Pi * lt1 / 180 - radLat2 := math.Pi * lt2 / 180 - radTheta := math.Pi * (ln1 - ln2) / 180 - - dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) - - if dist > 1 { - dist = 1 - } - - dist = math.Acos(dist) - dist = dist * 180 / math.Pi - dist = dist * 60 * 1.1515 * 1.609344 - - return dist -} diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go deleted file mode 100644 index b3f8d9c03..000000000 --- a/pkg/services/object_manager/placement/netmap.go +++ /dev/null @@ -1,88 +0,0 @@ -package placement - -import ( - "context" - "crypto/sha256" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type netMapBuilder struct { - nmSrc netmap.Source - containerCache *ContainerNodesCache -} - -type netMapSrc struct { - netmap.Source - - nm *netmapSDK.NetMap -} - -func NewNetworkMapBuilder(nm *netmapSDK.NetMap) Builder { - return &netMapBuilder{ - nmSrc: &netMapSrc{nm: nm}, - containerCache: NewContainerNodesCache(0), - } -} - -func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder { - return &netMapBuilder{ - nmSrc: nmSrc, - containerCache: NewContainerNodesCache(0), - } -} - -func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) { - return s.nm, nil -} - -func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc) - if err != nil { - return nil, fmt.Errorf("could not get network map: %w", err) - } - - cn, err := b.containerCache.ContainerNodes(nm, cnr, p) - if err != nil { - return nil, err - } - - return BuildObjectPlacement(nm, cn, obj) -} - -func BuildObjectPlacement(nm *netmapSDK.NetMap, cnrNodes [][]netmapSDK.NodeInfo, id *oid.ID) ([][]netmapSDK.NodeInfo, error) { - if id == nil { - return cnrNodes, nil - } - - binObj := make([]byte, sha256.Size) - id.Encode(binObj) - - on, err := nm.PlacementVectors(cnrNodes, binObj) - if err != nil { - return nil, fmt.Errorf("could not get placement vectors for object: %w", err) - } - - return on, nil -} - -// FlattenNodes appends each row to the flat list. -func FlattenNodes(ns [][]netmapSDK.NodeInfo) []netmapSDK.NodeInfo { - var sz int - - for i := range ns { - sz += len(ns[i]) - } - - result := make([]netmapSDK.NodeInfo, 0, sz) - - for i := range ns { - result = append(result, ns[i]...) - } - - return result -} diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go deleted file mode 100644 index a3f9af959..000000000 --- a/pkg/services/object_manager/placement/traverser.go +++ /dev/null @@ -1,391 +0,0 @@ -package placement - -import ( - "context" - "errors" - "fmt" - "slices" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Builder is an interface of the -// object placement vector builder. -type Builder interface { - // BuildPlacement returns the list of placement vectors - // for object according to the placement policy. - // - // Must return all container nodes if object identifier - // is nil. - BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) -} - -type NodeState interface { - // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure. - LocalNodeInfo() *netmap.NodeInfo -} - -// Option represents placement traverser option. -type Option func(*cfg) - -// Traverser represents utility for controlling -// traversal of object placement vectors. -type Traverser struct { - mtx sync.RWMutex - - vectors [][]netmap.NodeInfo - - rem []int -} - -type cfg struct { - trackCopies bool - copyNumbers []uint32 - - flatSuccess *uint32 - - cnr cid.ID - - obj *oid.ID - - policySet bool - policy netmap.PlacementPolicy - - builder Builder - - metrics []Metric - - nodeState NodeState -} - -const invalidOptsMsg = "invalid traverser options" - -var errNilBuilder = errors.New("placement builder is nil") - -var errNilPolicy = errors.New("placement policy is nil") - -var errCopiesNumberLen = errors.New("copies number accepts only one number or array with length " + - "equal to length of replicas") - -func defaultCfg() *cfg { - return &cfg{ - trackCopies: true, - } -} - -// NewTraverser creates, initializes with options and returns Traverser instance. -func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { - cfg := defaultCfg() - - for i := range opts { - if opts[i] != nil { - opts[i](cfg) - } - } - - cnLen := len(cfg.copyNumbers) - if cnLen > 0 && cnLen != 1 && cnLen != cfg.policy.NumberOfReplicas() { - return nil, errCopiesNumberLen - } - - if cfg.builder == nil { - return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilBuilder) - } else if !cfg.policySet { - return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy) - } - - ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy) - if err != nil { - return nil, fmt.Errorf("could not build placement: %w", err) - } - - // backward compatibility for scalar `copies_number` - if len(cfg.copyNumbers) == 1 && cfg.copyNumbers[0] != 0 { - cfg.flatSuccess = &cfg.copyNumbers[0] - } - - var rem []int - if len(cfg.metrics) > 0 && cfg.nodeState != nil { - rem = defaultCopiesVector(cfg.policy) - var unsortedVector []netmap.NodeInfo - var regularVector []netmap.NodeInfo - for i := range rem { - pivot := min(len(ns[i]), rem[i]) - unsortedVector = append(unsortedVector, ns[i][:pivot]...) - regularVector = append(regularVector, ns[i][pivot:]...) - } - rem = []int{-1, -1} - - sortedVector := sortVector(cfg, unsortedVector) - ns = [][]netmap.NodeInfo{sortedVector, regularVector} - } else if cfg.flatSuccess != nil { - ns = flatNodes(ns) - rem = []int{int(*cfg.flatSuccess)} - } else { - rem = defaultCopiesVector(cfg.policy) - - // Bool flag which is set when cfg.copyNumbers contains not only zeros. - // In this case we should not modify `rem` slice unless track - // copies are ignored, because [0, ...] means that all copies should be - // stored before returning OK to the client. - var considerCopiesNumber bool - for _, val := range cfg.copyNumbers { - if val != 0 { - considerCopiesNumber = true - break - } - } - - for i := range rem { - if !cfg.trackCopies { - rem[i] = -1 - } else if considerCopiesNumber && len(cfg.copyNumbers) > i { - rem[i] = int(cfg.copyNumbers[i]) - } - } - } - - return &Traverser{ - rem: rem, - vectors: ns, - }, nil -} - -func defaultCopiesVector(policy netmap.PlacementPolicy) []int { - replNum := policy.NumberOfReplicas() - copyVector := make([]int, 0, replNum) - - for i := range replNum { - copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount())) - } - - return copyVector -} - -func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo { - sz := 0 - for i := range ns { - sz += len(ns[i]) - } - - flat := make([]netmap.NodeInfo, 0, sz) - for i := range ns { - flat = append(flat, ns[i]...) - } - - return [][]netmap.NodeInfo{flat} -} - -type nodeMetrics struct { - index int - metrics []int -} - -func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo { - nm := make([]nodeMetrics, len(unsortedVector)) - node := cfg.nodeState.LocalNodeInfo() - - for i := range unsortedVector { - m := make([]int, len(cfg.metrics)) - for j, pm := range cfg.metrics { - m[j] = pm.CalculateValue(node, &unsortedVector[i]) - } - nm[i] = nodeMetrics{ - index: i, - metrics: m, - } - } - slices.SortStableFunc(nm, func(a, b nodeMetrics) int { - return slices.Compare(a.metrics, b.metrics) - }) - sortedVector := make([]netmap.NodeInfo, len(unsortedVector)) - for i := range unsortedVector { - sortedVector[i] = unsortedVector[nm[i].index] - } - return sortedVector -} - -// Node is a descriptor of storage node with information required for intra-container communication. -type Node struct { - addresses network.AddressGroup - - externalAddresses network.AddressGroup - - key []byte -} - -// Addresses returns group of network addresses. -func (x Node) Addresses() network.AddressGroup { - return x.addresses -} - -// ExternalAddresses returns group of network addresses. -func (x Node) ExternalAddresses() network.AddressGroup { - return x.externalAddresses -} - -// PublicKey returns public key in a binary format. Should not be mutated. -func (x Node) PublicKey() []byte { - return x.key -} - -// NewNode creates new Node. -func NewNode(addresses network.AddressGroup, externalAddresses network.AddressGroup, key []byte) Node { - return Node{ - addresses: addresses, - externalAddresses: externalAddresses, - key: key, - } -} - -// Next returns next unprocessed address of the object placement. -// -// Returns nil if no nodes left or traversal operation succeeded. -func (t *Traverser) Next() []Node { - t.mtx.Lock() - defer t.mtx.Unlock() - - t.skipEmptyVectors() - - if len(t.vectors) == 0 { - return nil - } else if len(t.vectors[0]) < t.rem[0] { - return nil - } - - count := t.rem[0] - if count < 0 { - count = len(t.vectors[0]) - } - - nodes := make([]Node, count) - - for i := range count { - err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i])) - if err != nil { - return nil - } - - ext := t.vectors[0][i].ExternalAddresses() - if len(ext) > 0 { - // Ignore the error if this field is incorrectly formed. - _ = nodes[i].externalAddresses.FromStringSlice(ext) - } - - nodes[i].key = t.vectors[0][i].PublicKey() - } - - t.vectors[0] = t.vectors[0][count:] - - return nodes -} - -func (t *Traverser) skipEmptyVectors() { - for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body - if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 { - t.vectors = slices.Delete(t.vectors, i, i+1) - t.rem = slices.Delete(t.rem, i, i+1) - i-- - } else { - break - } - } -} - -// SubmitSuccess writes single succeeded node operation. -func (t *Traverser) SubmitSuccess() { - t.mtx.Lock() - if len(t.rem) > 0 { - t.rem[0]-- - } - t.mtx.Unlock() -} - -// Success returns true if traversal operation succeeded. -func (t *Traverser) Success() bool { - t.mtx.RLock() - defer t.mtx.RUnlock() - - for i := range t.rem { - if t.rem[i] > 0 { - return false - } - } - - return true -} - -// UseBuilder is a placement builder setting option. -// -// Overlaps UseNetworkMap option. -func UseBuilder(b Builder) Option { - return func(c *cfg) { - c.builder = b - } -} - -// ForContainer is a traversal container setting option. -func ForContainer(cnr container.Container) Option { - return func(c *cfg) { - c.policy = cnr.PlacementPolicy() - c.policySet = true - container.CalculateID(&c.cnr, cnr) - } -} - -// ForObject is a processing object setting option. -func ForObject(id oid.ID) Option { - return func(c *cfg) { - c.obj = &id - } -} - -// SuccessAfter is a flat success number setting option. -// -// Option has no effect if the number is not positive. -func SuccessAfter(v uint32) Option { - return func(c *cfg) { - if v > 0 { - c.flatSuccess = &v - } - } -} - -// ResetSuccessAfter resets flat success number setting option. -func ResetSuccessAfter() Option { - return func(c *cfg) { - c.flatSuccess = nil - } -} - -// WithoutSuccessTracking disables success tracking in traversal. -func WithoutSuccessTracking() Option { - return func(c *cfg) { - c.trackCopies = false - } -} - -func WithCopyNumbers(v []uint32) Option { - return func(c *cfg) { - c.copyNumbers = v - } -} - -// WithPriorityMetrics use provided priority metrics to sort nodes. -func WithPriorityMetrics(m []Metric) Option { - return func(c *cfg) { - c.metrics = m - } -} - -// WithNodeState provide state of the current node. -func WithNodeState(s NodeState) Option { - return func(c *cfg) { - c.nodeState = s - } -} diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go deleted file mode 100644 index d1370f21e..000000000 --- a/pkg/services/object_manager/placement/traverser_test.go +++ /dev/null @@ -1,653 +0,0 @@ -package placement - -import ( - "context" - "slices" - "strconv" - "testing" - - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -type testBuilder struct { - vectors [][]netmap.NodeInfo -} - -func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { - return b.vectors, nil -} - -func testNode(v uint32) (n netmap.NodeInfo) { - ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)) - n.SetNetworkEndpoints(ip) - n.SetPublicKey([]byte(ip)) - - return n -} - -func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { - vc := make([][]netmap.NodeInfo, 0, len(v)) - - for i := range v { - ns := slices.Clone(v[i]) - - vc = append(vc, ns) - } - - return vc -} - -func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) { - return placement(ss, rs, nil) -} - -func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) { - return placement(ss, nil, ec) -} - -func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) { - nodes := make([][]netmap.NodeInfo, 0, len(rs)) - replicas := make([]netmap.ReplicaDescriptor, 0, len(rs)) - num := uint32(0) - - for i := range ss { - ns := make([]netmap.NodeInfo, 0, ss[i]) - - for range ss[i] { - ns = append(ns, testNode(num)) - num++ - } - - nodes = append(nodes, ns) - - var rd netmap.ReplicaDescriptor - if len(rs) > 0 { - rd.SetNumberOfObjects(uint32(rs[i])) - } else { - rd.SetECDataCount(uint32(ec[i][0])) - rd.SetECParityCount(uint32(ec[i][1])) - } - - replicas = append(replicas, rd) - } - - var policy netmap.PlacementPolicy - policy.AddReplicas(replicas...) - - var cnr container.Container - cnr.SetPlacementPolicy(policy) - - return nodes, cnr -} - -func assertSameAddress(t *testing.T, ni netmap.NodeInfo, addr network.AddressGroup) { - var netAddr network.AddressGroup - - err := netAddr.FromIterator(netmapcore.Node(ni)) - require.NoError(t, err) - require.True(t, netAddr.Intersects(addr)) -} - -func TestTraverserObjectScenarios(t *testing.T) { - t.Run("search scenario", func(t *testing.T) { - selectors := []int{2, 3} - replicas := []int{1, 2} - - nodes, cnr := testPlacement(selectors, replicas) - - nodesCopy := copyVectors(nodes) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{vectors: nodesCopy}), - WithoutSuccessTracking(), - ) - require.NoError(t, err) - - for i := range selectors { - addrs := tr.Next() - - require.Len(t, addrs, len(nodes[i])) - - for j, n := range nodes[i] { - assertSameAddress(t, n, addrs[j].Addresses()) - } - } - - require.Empty(t, tr.Next()) - require.True(t, tr.Success()) - }) - - t.Run("read scenario", func(t *testing.T) { - selectors := []int{5, 3} - replicas := []int{2, 2} - - nodes, cnr := testPlacement(selectors, replicas) - - nodesCopy := copyVectors(nodes) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - SuccessAfter(1), - ) - require.NoError(t, err) - - for range len(nodes[0]) { - require.NotNil(t, tr.Next()) - } - - var n network.AddressGroup - - err = n.FromIterator(netmapcore.Node(nodes[1][0])) - require.NoError(t, err) - - require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next()) - }) - - t.Run("put scenario", func(t *testing.T) { - selectors := []int{5, 3} - replicas := []int{2, 2} - - nodes, cnr := testPlacement(selectors, replicas) - - nodesCopy := copyVectors(nodes) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{vectors: nodesCopy}), - ) - require.NoError(t, err) - - fn := func(curVector int) { - for i := 0; i+replicas[curVector] < selectors[curVector]; i += replicas[curVector] { - addrs := tr.Next() - require.Len(t, addrs, replicas[curVector]) - - for j := range addrs { - assertSameAddress(t, nodes[curVector][i+j], addrs[j].Addresses()) - } - } - - require.Empty(t, tr.Next()) - require.False(t, tr.Success()) - - for range replicas[curVector] { - tr.SubmitSuccess() - } - } - - for i := range selectors { - fn(i) - - if i < len(selectors)-1 { - require.False(t, tr.Success()) - } else { - require.True(t, tr.Success()) - } - } - }) - - t.Run("local operation scenario", func(t *testing.T) { - selectors := []int{2, 3} - replicas := []int{1, 2} - - nodes, cnr := testPlacement(selectors, replicas) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local) - }), - SuccessAfter(1), - ) - require.NoError(t, err) - - require.NotEmpty(t, tr.Next()) - require.False(t, tr.Success()) - - // add 1 OK - tr.SubmitSuccess() - - // nothing more to do - require.Empty(t, tr.Next()) - - // common success - require.True(t, tr.Success()) - }) -} - -func TestTraverserRemValues(t *testing.T) { - selectors := []int{3, 4, 5} - replicas := []int{2, 3, 4} - - nodes, cnr := testPlacement(selectors, replicas) - nodesCopy := copyVectors(nodes) - - testCases := [...]struct { - name string - copyNumbers []uint32 - expectedRem []int - expectedErr error - }{ - { - name: "zero copy numbers", - copyNumbers: []uint32{}, - expectedRem: replicas, - }, - { - name: "compatible zero copy numbers, len 1", - copyNumbers: []uint32{0}, - expectedRem: replicas, - }, - { - name: "compatible zero copy numbers, len 3", - copyNumbers: []uint32{0, 0, 0}, - expectedRem: replicas, - }, - { - name: "copy numbers for all replicas", - copyNumbers: []uint32{1, 1, 1}, - expectedRem: []int{1, 1, 1}, - }, - { - name: "single copy numbers for multiple replicas", - copyNumbers: []uint32{1}, - expectedRem: []int{1}, // may be a bit unexpected - }, - { - name: "multiple copy numbers for multiple replicas", - copyNumbers: []uint32{1, 1, 4}, - expectedRem: []int{1, 1, 4}, - }, - { - name: "incompatible copies number vector", - copyNumbers: []uint32{1, 1}, - expectedErr: errCopiesNumberLen, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{vectors: nodesCopy}), - WithCopyNumbers(testCase.copyNumbers), - ) - if testCase.expectedErr == nil { - require.NoError(t, err, testCase.name) - require.Equal(t, testCase.expectedRem, tr.rem, testCase.name) - } else { - require.Error(t, err, testCase.expectedErr, testCase.name) - } - }) - } -} - -type nodeState struct { - node *netmap.NodeInfo -} - -func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo { - return n.node -} - -func TestTraverserPriorityMetrics(t *testing.T) { - t.Run("one rep one metric", func(t *testing.T) { - selectors := []int{4} - replicas := []int{3} - - nodes, cnr := testPlacement(selectors, replicas) - - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("ClusterName", "A") - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("ClusterName", "A") - // Node_2, PK - ip4/0.0.0.0/tcp/2 - nodes[0][2].SetAttribute("ClusterName", "B") - // Node_3, PK - ip4/0.0.0.0/tcp/3 - nodes[0][3].SetAttribute("ClusterName", "B") - - sdkNode := testNode(5) - sdkNode.SetAttribute("ClusterName", "B") - - nodesCopy := copyVectors(nodes) - - m := []Metric{NewAttributeMetric("ClusterName")} - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Without priority metric `ClusterName` the order will be: - // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}] - // With priority metric `ClusterName` and current node in cluster B - // the order should be: - // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}] - next := tr.Next() - require.NotNil(t, next) - require.Equal(t, 3, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey())) - - next = tr.Next() - // The last node is - require.Equal(t, 1, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) - - t.Run("one rep one metric fewer nodes", func(t *testing.T) { - selectors := []int{2} - replicas := []int{3} - - nodes, cnr := testPlacement(selectors, replicas) - - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("ClusterName", "A") - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("ClusterName", "B") - - sdkNode := testNode(5) - sdkNode.SetAttribute("ClusterName", "B") - - nodesCopy := copyVectors(nodes) - - m := []Metric{NewAttributeMetric("ClusterName")} - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Without priority metric `ClusterName` the order will be: - // [ {Node_0 A}, {Node_1 A} ] - // With priority metric `ClusterName` and current node in cluster B - // the order should be: - // [ {Node_1 B}, {Node_0 A} ] - next := tr.Next() - require.NotNil(t, next) - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) - - t.Run("two reps two metrics", func(t *testing.T) { - selectors := []int{3, 3} - replicas := []int{2, 2} - - nodes, cnr := testPlacement(selectors, replicas) - - // REPLICA #1 - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("ClusterName", "A") - nodes[0][0].SetAttribute("UN-LOCODE", "RU LED") - - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("ClusterName", "A") - nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL") - - // Node_2, PK - ip4/0.0.0.0/tcp/2 - nodes[0][2].SetAttribute("ClusterName", "A") - nodes[0][2].SetAttribute("UN-LOCODE", "RU LED") - - // REPLICA #2 - // Node_3 ip4/0.0.0.0/tcp/3 - nodes[1][0].SetAttribute("ClusterName", "B") - nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW") - - // Node_4, PK - ip4/0.0.0.0/tcp/4 - nodes[1][1].SetAttribute("ClusterName", "B") - nodes[1][1].SetAttribute("UN-LOCODE", "RU DME") - - // Node_5, PK - ip4/0.0.0.0/tcp/5 - nodes[1][2].SetAttribute("ClusterName", "B") - nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW") - - sdkNode := testNode(9) - sdkNode.SetAttribute("ClusterName", "B") - sdkNode.SetAttribute("UN-LOCODE", "RU DME") - - nodesCopy := copyVectors(nodes) - - m := []Metric{ - NewAttributeMetric("ClusterName"), - NewAttributeMetric("UN-LOCODE"), - } - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Check that nodes in the same cluster and - // in the same location should be the first in slice. - // Nodes which are follow criteria but stay outside the replica - // should be in the next slice. - - next := tr.Next() - require.Equal(t, 4, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey())) - - next = tr.Next() - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - - sdkNode.SetAttribute("ClusterName", "B") - sdkNode.SetAttribute("UN-LOCODE", "RU MOW") - - nodesCopy = copyVectors(nodes) - - tr, err = NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - next = tr.Next() - require.Equal(t, 4, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey())) - - next = tr.Next() - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - - sdkNode.SetAttribute("ClusterName", "A") - sdkNode.SetAttribute("UN-LOCODE", "RU LED") - - nodesCopy = copyVectors(nodes) - - tr, err = NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - next = tr.Next() - require.Equal(t, 4, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey())) - - next = tr.Next() - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) - - t.Run("ec container", func(t *testing.T) { - selectors := []int{4} - ec := [][]int{{2, 1}} - - nodes, cnr := testECPlacement(selectors, ec) - - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("ClusterName", "A") - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("ClusterName", "A") - // Node_2, PK - ip4/0.0.0.0/tcp/2 - nodes[0][2].SetAttribute("ClusterName", "B") - // Node_3, PK - ip4/0.0.0.0/tcp/3 - nodes[0][3].SetAttribute("ClusterName", "B") - - sdkNode := testNode(5) - sdkNode.SetAttribute("ClusterName", "B") - - nodesCopy := copyVectors(nodes) - - m := []Metric{NewAttributeMetric("ClusterName")} - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Without priority metric `ClusterName` the order will be: - // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}] - // With priority metric `ClusterName` and current node in cluster B - // the order should be: - // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}] - next := tr.Next() - require.NotNil(t, next) - require.Equal(t, 3, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey())) - - next = tr.Next() - // The last node is - require.Equal(t, 1, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) - - t.Run("one rep one geo metric", func(t *testing.T) { - t.Skip() - selectors := []int{2} - replicas := []int{2} - - nodes, cnr := testPlacement(selectors, replicas) - - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") - - sdkNode := testNode(2) - sdkNode.SetAttribute("UN-LOCODE", "FI HEL") - - nodesCopy := copyVectors(nodes) - - parser, err := NewMetricsParser("/path/to/locode_db") - require.NoError(t, err) - m, err := parser.ParseMetrics([]string{geoDistance}) - require.NoError(t, err) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Without priority metric `$geoDistance` the order will be: - // [ {Node_0 RU MOW}, {Node_1 RU LED}] - // With priority metric `$geoDistance` the order should be: - // [ {Node_1 RU LED}, {Node_0 RU MOW}] - next := tr.Next() - require.NotNil(t, next) - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) -} diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go deleted file mode 100644 index e5f001d5a..000000000 --- a/pkg/services/object_manager/tombstone/checker.go +++ /dev/null @@ -1,93 +0,0 @@ -package tombstone - -import ( - "context" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - lru "github.com/hashicorp/golang-lru/v2" - "go.uber.org/zap" -) - -// Source is a tombstone source interface. -type Source interface { - // Tombstone must return tombstone from the source it was - // configured to fetch from and any error that appeared during - // fetching process. - // - // Tombstone MUST return (nil, nil) if requested tombstone is - // missing in the storage for the provided epoch. - Tombstone(ctx context.Context, a oid.Address, epoch uint64) (*objectSDK.Object, error) -} - -// ExpirationChecker is a tombstone source wrapper. -// It checks tombstones presence via tombstone -// source, caches it checks its expiration. -// -// Must be created via NewChecker function. `var` and -// `ExpirationChecker{}` declarations leads to undefined behaviour -// and may lead to panics. -type ExpirationChecker struct { - cache *lru.Cache[string, uint64] - - log *logger.Logger - - tsSource Source -} - -// IsTombstoneAvailable checks the tombstone presence in the system in the -// following order: -// - 1. Local LRU cache; -// - 2. Tombstone source. -// -// If a tombstone was successfully fetched (regardless of its expiration) -// it is cached in the LRU cache. -func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Address, epoch uint64) bool { - addrStr := a.EncodeToString() - log := g.log.With(zap.String("address", addrStr)) - - expEpoch, ok := g.cache.Get(addrStr) - if ok { - return expEpoch > epoch - } - - ts, err := g.tsSource.Tombstone(ctx, a, epoch) - if err != nil { - log.Warn(ctx, - logs.TombstoneCouldNotGetTheTombstoneTheSource, - zap.Error(err), - ) - } else if ts != nil { - return g.handleTS(ctx, addrStr, ts, epoch) - } - - // requested tombstone not - // found in the FrostFS network - return false -} - -func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool { - for _, atr := range ts.Attributes() { - if atr.Key() == objectV2.SysAttributeExpEpoch { - epoch, err := strconv.ParseUint(atr.Value(), 10, 64) - if err != nil { - g.log.Warn(ctx, - logs.TombstoneExpirationParseFailure, - zap.Error(err), - ) - - return false - } - - g.cache.Add(addr, epoch) - return epoch >= reqEpoch - } - } - - // unexpected tombstone without expiration epoch - return false -} diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go deleted file mode 100644 index 2147a32fe..000000000 --- a/pkg/services/object_manager/tombstone/constructor.go +++ /dev/null @@ -1,84 +0,0 @@ -package tombstone - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - lru "github.com/hashicorp/golang-lru/v2" - "go.uber.org/zap" -) - -const defaultLRUCacheSize = 100 - -type cfg struct { - log *logger.Logger - - cacheSize int - - tsSource Source -} - -// Option is an option of ExpirationChecker's constructor. -type Option func(*cfg) - -func defaultCfg() *cfg { - return &cfg{ - log: logger.NewLoggerWrapper(zap.NewNop()), - cacheSize: defaultLRUCacheSize, - } -} - -// NewChecker creates, initializes and returns tombstone ExpirationChecker. -// The returned structure is ready to use. -// -// Panics if any of the provided options does not allow -// constructing a valid tombstone ExpirationChecker. -func NewChecker(oo ...Option) *ExpirationChecker { - cfg := defaultCfg() - - for _, o := range oo { - o(cfg) - } - - panicOnNil := func(v any, name string) { - if v == nil { - panic(fmt.Sprintf("tombstone getter constructor: %s is nil", name)) - } - } - - panicOnNil(cfg.tsSource, "Tombstone source") - - cache, err := lru.New[string, uint64](cfg.cacheSize) - assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) - - return &ExpirationChecker{ - cache: cache, - log: cfg.log, - tsSource: cfg.tsSource, - } -} - -// WithLogger returns an option to specify -// logger. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} - -// WithCacheSize returns an option to specify -// LRU cache size. -func WithCacheSize(v int) Option { - return func(c *cfg) { - c.cacheSize = v - } -} - -// WithTombstoneSource returns an option -// to specify tombstone source. -func WithTombstoneSource(v Source) Option { - return func(c *cfg) { - c.tsSource = v - } -} diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go deleted file mode 100644 index 975941847..000000000 --- a/pkg/services/object_manager/tombstone/source/source.go +++ /dev/null @@ -1,81 +0,0 @@ -package tsourse - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Source represents wrapper over the object service that -// allows checking if a tombstone is available in FrostFS -// network. -// -// Must be created via NewSource function. `var` and `Source{}` -// declarations leads to undefined behaviour and may lead -// to panics. -type Source struct { - s *getsvc.Service -} - -// TombstoneSourcePrm groups required parameters for Source creation. -type TombstoneSourcePrm struct { - s *getsvc.Service -} - -// SetGetService sets object service. -func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { - s.s = v -} - -// NewSource creates, initialize and returns local tombstone Source. -// The returned structure is ready to use. -// -// Panics if any of the provided options does not allow -// constructing a valid tombstone local Source. -func NewSource(p TombstoneSourcePrm) Source { - assert.False(p.s == nil, "Tombstone source: nil object service") - - return Source(p) -} - -type headerWriter struct { - o *objectSDK.Object -} - -func (h *headerWriter) WriteHeader(_ context.Context, o *objectSDK.Object) error { - h.o = o - return nil -} - -// Tombstone checks if the engine stores tombstone. -// Returns nil, nil if the tombstone has been removed -// or marked for removal. -func (s Source) Tombstone(ctx context.Context, a oid.Address, _ uint64) (*objectSDK.Object, error) { - var hr headerWriter - - var headPrm getsvc.HeadPrm - headPrm.WithAddress(a) - headPrm.SetHeaderWriter(&hr) - headPrm.SetCommonParameters(&util.CommonPrm{}) // default values are ok for that operation - - err := s.s.Head(ctx, headPrm) - switch { - case client.IsErrObjectNotFound(err) || client.IsErrObjectAlreadyRemoved(err): - return nil, nil - case err != nil: - return nil, fmt.Errorf("could not get tombstone from the source: %w", err) - default: - } - - if hr.o.Type() != objectSDK.TypeTombstone { - return nil, fmt.Errorf("returned %s object is not a tombstone", a) - } - - return hr.o, nil -} diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go deleted file mode 100644 index dcaaec0b4..000000000 --- a/pkg/services/policer/check.go +++ /dev/null @@ -1,213 +0,0 @@ -package policer - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error { - ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes( - attribute.String("address", objInfo.Address.String()), - attribute.Bool("is_linking_object", objInfo.IsLinkingObject), - attribute.Bool("is_ec_part", objInfo.ECInfo != nil), - attribute.String("type", objInfo.Type.String()), - )) - defer span.End() - - cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container()) - if err != nil { - if client.IsErrContainerNotFound(err) { - existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container()) - if errWasRemoved != nil { - return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved) - } else if existed { - err := p.buryFn(ctx, objInfo.Address) - if err != nil { - return fmt.Errorf("%s: %w", logs.PolicerCouldNotInhumeObjectWithMissingContainer, err) - } - } - } - - return fmt.Errorf("%s: %w", logs.PolicerCouldNotGetContainer, err) - } - - policy := cnr.Value.PlacementPolicy() - - if policycore.IsECPlacement(policy) { - return p.processECContainerObject(ctx, objInfo, cnr.Value) - } - return p.processRepContainerObject(ctx, objInfo, policy) -} - -func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { - idObj := objInfo.Address.Object() - idCnr := objInfo.Address.Container() - nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy) - if err != nil { - return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) - } - - c := &placementRequirements{} - - // cached info about already checked nodes - checkedNodes := newNodeCache() - - for i := range nn { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - shortage := policy.ReplicaDescriptor(i).NumberOfObjects() - if objInfo.Type == objectSDK.TypeLock || objInfo.Type == objectSDK.TypeTombstone || objInfo.IsLinkingObject { - // all nodes of a container must store the `LOCK`, `TOMBSTONE` and linking objects - // for correct object removal protection: - // - `LOCK`, `TOMBSTONE` and linking objects are broadcast on their PUT requests; - // - `LOCK` object removal is a prohibited action in the GC. - shortage = uint32(len(nn[i])) - } - - p.processRepNodes(ctx, c, objInfo, nn[i], shortage, checkedNodes) - } - - if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, - zap.Stringer("object", objInfo.Address), - ) - - p.cbRedundantCopy(ctx, objInfo.Address) - } - return nil -} - -type placementRequirements struct { - // needLocalCopy is true if the current node must store an object according to the storage policy. - needLocalCopy bool - // removeLocalCopy is true if all copies are stored according to the storage policy - // and the current node doesn't need to store an object. - removeLocalCopy bool -} - -func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRequirements, objInfo objectcore.Info, - nodes []netmap.NodeInfo, shortage uint32, checkedNodes nodeCache, -) { - addr := objInfo.Address - - // Number of copies that are stored on maintenance nodes. - var uncheckedCopies int - var candidates []netmap.NodeInfo - for i := 0; shortage > 0 && i < len(nodes); i++ { - select { - case <-ctx.Done(): - return - default: - } - - var err error - st := checkedNodes.processStatus(nodes[i]) - if !st.Processed() { - st, err = p.checkStatus(ctx, addr, nodes[i]) - checkedNodes.set(nodes[i], st) - if st == nodeDoesNotHoldObject { - // 1. This is the first time the node is encountered (`!st.Processed()`). - // 2. The node does not hold object (`st == nodeDoesNotHoldObject`). - // So we need to try to put an object to it. - candidates = append(candidates, nodes[i]) - continue - } - } - - switch st { - case nodeIsLocal: - requirements.needLocalCopy = true - - shortage-- - case nodeIsUnderMaintenance: - shortage-- - uncheckedCopies++ - - p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, - zap.String("node", netmap.StringifyPublicKey(nodes[i]))) - case nodeHoldsObject: - shortage-- - case nodeDoesNotHoldObject: - case nodeStatusUnknown: - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, - zap.Stringer("object", addr), - zap.Error(err)) - default: - panic("unreachable") - } - } - - p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies) -} - -func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) { - if p.netmapKeys.IsLocalKey(node.PublicKey()) { - return nodeIsLocal, nil - } - if node.Status().IsMaintenance() { - return nodeIsUnderMaintenance, nil - } - - callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) - _, err := p.remoteHeader(callCtx, node, addr, false) - cancel() - - if err == nil { - return nodeHoldsObject, nil - } - if client.IsErrObjectNotFound(err) { - return nodeDoesNotHoldObject, nil - } - if client.IsErrNodeUnderMaintenance(err) { - return nodeIsUnderMaintenance, nil - } - return nodeStatusUnknown, err -} - -func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, - nodes []netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int, -) { - switch { - case shortage > 0: - p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, - zap.Stringer("object", addr), - zap.Uint32("shortage", shortage), - ) - - task := replicator.Task{ - NumCopies: shortage, - Addr: addr, - Nodes: nodes, - } - - p.replicator.HandleReplicationTask(ctx, task, checkedNodes) - - case uncheckedCopies > 0: - // If we have more copies than needed, but some of them are from the maintenance nodes, - // save the local copy. - p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, - zap.Int("count", uncheckedCopies)) - - case uncheckedCopies == 0: - // Safe to remove: checked all copies, shortage == 0. - requirements.removeLocalCopy = true - } -} diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go deleted file mode 100644 index 69879c439..000000000 --- a/pkg/services/policer/check_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package policer - -import ( - "testing" - - netmaptest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap/test" - "github.com/stretchr/testify/require" -) - -func TestNodeCache(t *testing.T) { - cache := newNodeCache() - node := netmaptest.NodeInfo() - - require.Equal(t, cache.processStatus(node), nodeNotProcessed) - - cache.SubmitSuccessfulReplication(node) - require.Equal(t, cache.processStatus(node), nodeHoldsObject) - - cache.set(node, nodeDoesNotHoldObject) - require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject) - - cache.set(node, nodeHoldsObject) - require.Equal(t, cache.processStatus(node), nodeHoldsObject) -} diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go deleted file mode 100644 index fbdeb3148..000000000 --- a/pkg/services/policer/ec.go +++ /dev/null @@ -1,395 +0,0 @@ -package policer - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -var errNoECinfoReturnded = errors.New("no EC info returned") - -type ecChunkProcessResult struct { - validPlacement bool - removeLocal bool -} - -var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node") - -func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { - if objInfo.ECInfo == nil { - return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy()) - } - return p.processECContainerECObject(ctx, objInfo, cnr) -} - -// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects. -// All of them must be stored on all of the container nodes. -func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { - objID := objInfo.Address.Object() - nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy) - if err != nil { - return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) - } - if len(nn) != 1 || len(nn[0]) == 0 { - return errInvalidECPlacement - } - - c := &placementRequirements{} - checkedNodes := newNodeCache() - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes) - - if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, - zap.Stringer("object", objInfo.Address), - ) - - p.cbRedundantCopy(ctx, objInfo.Address) - } - return nil -} - -func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { - nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) - if err != nil { - return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) - } - if len(nn) != 1 || len(nn[0]) == 0 { - return errInvalidECPlacement - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - res := p.processECChunk(ctx, objInfo, nn[0]) - if !res.validPlacement { - // drop local chunk only if all required chunks are in place - res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr) - } - p.adjustECPlacement(ctx, objInfo, nn[0], cnr) - - if res.removeLocal { - p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address)) - p.cbRedundantCopy(ctx, objInfo.Address) - } - return nil -} - -// processECChunk replicates EC chunk if needed. -func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { - var removeLocalChunk bool - requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { - // current node is required node, we are happy - return ecChunkProcessResult{ - validPlacement: true, - } - } - if requiredNode.Status().IsMaintenance() { - // consider maintenance mode has object, but do not drop local copy - p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) - return ecChunkProcessResult{} - } - - callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) - _, err := p.remoteHeader(callCtx, requiredNode, objInfo.Address, false) - cancel() - - if err == nil { - removeLocalChunk = true - } else if client.IsErrObjectNotFound(err) { - p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1)) - task := replicator.Task{ - NumCopies: 1, - Addr: objInfo.Address, - Nodes: []netmap.NodeInfo{requiredNode}, - } - p.replicator.HandleReplicationTask(ctx, task, newNodeCache()) - } else if client.IsErrNodeUnderMaintenance(err) { - // consider maintenance mode has object, but do not drop local copy - p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) - } else { - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err)) - } - - return ecChunkProcessResult{ - removeLocal: removeLocalChunk, - } -} - -func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool { - var parentAddress oid.Address - parentAddress.SetContainer(objInfo.Address.Container()) - parentAddress.SetObject(objInfo.ECInfo.ParentID) - - requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo) - if len(requiredChunkIndexes) == 0 { - p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID)) - return true - } - - err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes) - if err != nil { - p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress)) - return false - } - if len(requiredChunkIndexes) == 0 { - return true - } - - indexToObjectID := make(map[uint32]oid.ID) - success := p.resolveRemoteECChunks(ctx, parentAddress, nodes, requiredChunkIndexes, indexToObjectID) - if !success { - return false - } - - for index, candidates := range requiredChunkIndexes { - var addr oid.Address - addr.SetContainer(objInfo.Address.Container()) - addr.SetObject(indexToObjectID[index]) - p.replicator.HandlePullTask(ctx, replicator.Task{ - Addr: addr, - Nodes: candidates, - Container: cnr, - }) - } - // there was some missing chunks, it's not ok - return false -} - -func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objectcore.Info) map[uint32][]netmap.NodeInfo { - requiredChunkIndexes := make(map[uint32][]netmap.NodeInfo) - for i, n := range nodes { - if uint32(i) == objInfo.ECInfo.Total { - break - } - if p.netmapKeys.IsLocalKey(n.PublicKey()) { - requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} - } - } - return requiredChunkIndexes -} - -func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Address, required map[uint32][]netmap.NodeInfo) error { - _, err := p.localHeader(ctx, parentAddress) - var eiErr *objectSDK.ECInfoError - if err == nil { // should not be happen - return errNoECinfoReturnded - } - if !errors.As(err, &eiErr) { - return err - } - for _, ch := range eiErr.ECInfo().Chunks { - delete(required, ch.Index) - } - return nil -} - -func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { - var eiErr *objectSDK.ECInfoError - for _, n := range nodes { - if p.netmapKeys.IsLocalKey(n.PublicKey()) { - continue - } - _, err := p.remoteHeader(ctx, n, parentAddress, true) - if !errors.As(err, &eiErr) { - continue - } - for _, ch := range eiErr.ECInfo().Chunks { - if candidates, ok := required[ch.Index]; ok { - candidates = append(candidates, n) - required[ch.Index] = candidates - - var chunkID oid.ID - if err := chunkID.ReadFromV2(ch.ID); err != nil { - p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) - return false - } - if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID { - p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed), - zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index)) - return false - } - indexToObjectID[ch.Index] = chunkID - } - } - } - - for index, candidates := range required { - if len(candidates) == 0 { - p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index)) - return false - } - } - - return true -} - -func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) { - var parentAddress oid.Address - parentAddress.SetContainer(objInfo.Address.Container()) - parentAddress.SetObject(objInfo.ECInfo.ParentID) - var eiErr *objectSDK.ECInfoError - resolved := make(map[uint32][]netmap.NodeInfo) - chunkIDs := make(map[uint32]oid.ID) - restore := true // do not restore EC chunks if some node returned error - for idx, n := range nodes { - if uint32(idx) >= objInfo.ECInfo.Total && uint32(len(resolved)) == objInfo.ECInfo.Total { - return - } - var err error - if p.netmapKeys.IsLocalKey(n.PublicKey()) { - _, err = p.localHeader(ctx, parentAddress) - } else { - _, err = p.remoteHeader(ctx, n, parentAddress, true) - } - - if errors.As(err, &eiErr) { - for _, ch := range eiErr.ECInfo().Chunks { - resolved[ch.Index] = append(resolved[ch.Index], n) - var ecInfoChunkID oid.ID - if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil { - p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) - return - } - if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID { - p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID), - zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index)) - return - } - chunkIDs[ch.Index] = ecInfoChunkID - } - } else if client.IsErrObjectAlreadyRemoved(err) { - restore = false - } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { - p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) - p.replicator.HandleReplicationTask(ctx, replicator.Task{ - NumCopies: 1, - Addr: objInfo.Address, - Nodes: []netmap.NodeInfo{n}, - }, newNodeCache()) - restore = false - } - } - if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total { - return - } - if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() { - var found []uint32 - for i := range resolved { - found = append(found, i) - } - p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found)) - return - } - p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr) -} - -func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, - cnr containerSDK.Container, -) { - c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount())) - if err != nil { - p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) - return - } - parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs) - if parts == nil { - return - } - key, err := p.keyStorage.GetKey(nil) - if err != nil { - p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) - return - } - required := make([]bool, len(parts)) - for i, p := range parts { - if p == nil { - required[i] = true - } - } - if err := c.ReconstructParts(parts, required, key); err != nil { - p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) - return - } - for idx, part := range parts { - if _, exists := existedChunks[uint32(idx)]; exists { - continue - } - var addr oid.Address - addr.SetContainer(parentAddress.Container()) - pID, _ := part.ID() - addr.SetObject(pID) - targetNode := nodes[idx%len(nodes)] - if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { - p.replicator.HandleLocalPutTask(ctx, replicator.Task{ - Addr: addr, - Obj: part, - Container: cnr, - }) - } else { - p.replicator.HandleReplicationTask(ctx, replicator.Task{ - NumCopies: 1, - Addr: addr, - Nodes: []netmap.NodeInfo{targetNode}, - Obj: part, - }, newNodeCache()) - } - } -} - -func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.Info, existedChunks map[uint32][]netmap.NodeInfo, parentAddress oid.Address, chunkIDs map[uint32]oid.ID) []*objectSDK.Object { - parts := make([]*objectSDK.Object, objInfo.ECInfo.Total) - errGroup, egCtx := errgroup.WithContext(ctx) - for idx, nodes := range existedChunks { - errGroup.Go(func() error { - var objID oid.Address - objID.SetContainer(parentAddress.Container()) - objID.SetObject(chunkIDs[idx]) - var obj *objectSDK.Object - var err error - for _, node := range nodes { - if p.netmapKeys.IsLocalKey(node.PublicKey()) { - obj, err = p.localObject(egCtx, objID) - } else { - obj, err = p.remoteObject(egCtx, node, objID) - } - if err == nil { - break - } - p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey()))) - } - if obj != nil { - parts[idx] = obj - } - return nil - }) - } - if err := errGroup.Wait(); err != nil { - p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err)) - return nil - } - return parts -} diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go deleted file mode 100644 index c6980536b..000000000 --- a/pkg/services/policer/ec_test.go +++ /dev/null @@ -1,710 +0,0 @@ -package policer - -import ( - "bytes" - "context" - "crypto/rand" - "errors" - "fmt" - "sync/atomic" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestECChunkHasValidPlacement(t *testing.T) { - t.Parallel() - chunkAddress := oidtest.Address() - parentID := oidtest.ID() - - var policy netmapSDK.PlacementPolicy - require.NoError(t, policy.DecodeString("EC 2.1")) - - cnr := &container.Container{} - cnr.Value.Init() - cnr.Value.SetPlacementPolicy(policy) - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - if id.Equals(chunkAddress.Container()) { - return cnr, nil - } - return nil, new(apistatus.ContainerNotFound) - }, - } - - nodes := make([]netmapSDK.NodeInfo, 4) - for i := range nodes { - nodes[i].SetPublicKey([]byte{byte(i)}) - } - - placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - if cnr.Equals(chunkAddress.Container()) && obj.Equals(parentID) { - return [][]netmapSDK.NodeInfo{nodes}, nil - } - return nil, errors.New("unexpected placement build") - } - - remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - require.True(t, raw, "remote header for parent object must be called with raw flag") - index := int(ni.PublicKey()[0]) - require.True(t, index == 1 || index == 2, "invalid node to get parent header") - require.True(t, a.Container() == chunkAddress.Container() && a.Object() == parentID, "invalid address to get remote header") - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = uint32(index) - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) { - require.True(t, a.Container() == chunkAddress.Container() && a.Object() == parentID, "invalid address to get remote header") - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = uint32(0) - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(remoteHeadFn), - WithLocalObjectHeaderFunc(localHeadFn), - WithPool(testPool(t)), - ) - - objInfo := objectcore.Info{ - Address: chunkAddress, - Type: objectSDK.TypeRegular, - ECInfo: &objectcore.ECInfo{ - ParentID: parentID, - Index: 0, - Total: 3, - }, - } - err := p.processObject(context.Background(), objInfo) - require.NoError(t, err) -} - -func TestECChunkHasInvalidPlacement(t *testing.T) { - t.Parallel() - chunkAddress := oidtest.Address() - parentID := oidtest.ID() - chunkObject := objectSDK.New() - chunkObject.SetContainerID(chunkAddress.Container()) - chunkObject.SetID(chunkAddress.Object()) - chunkObject.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) - chunkObject.SetPayloadSize(uint64(10)) - chunkObject.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: parentID}, 0, 3, []byte{}, 0)) - - var policy netmapSDK.PlacementPolicy - require.NoError(t, policy.DecodeString("EC 2.1")) - - cnr := &container.Container{} - cnr.Value.Init() - cnr.Value.SetPlacementPolicy(policy) - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - if id.Equals(chunkAddress.Container()) { - return cnr, nil - } - return nil, new(apistatus.ContainerNotFound) - }, - } - - nodes := make([]netmapSDK.NodeInfo, 4) - for i := range nodes { - nodes[i].SetPublicKey([]byte{byte(i)}) - } - - placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - if cnr.Equals(chunkAddress.Container()) && obj.Equals(parentID) { - return [][]netmapSDK.NodeInfo{nodes}, nil - } - return nil, errors.New("unexpected placement build") - } - - objInfo := objectcore.Info{ - Address: chunkAddress, - Type: objectSDK.TypeRegular, - ECInfo: &objectcore.ECInfo{ - ParentID: parentID, - Index: 1, - Total: 3, - }, - } - - t.Run("node0 has chunk1, node1 has chunk0 and chunk1", func(t *testing.T) { - // policer should pull chunk0 on first run and drop chunk1 on second run - var allowDrop bool - requiredChunkID := oidtest.ID() - headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw { - return chunkObject, nil - } - if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - ch.Index = 0 - ch.SetID(requiredChunkID) - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = 2 - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - return nil, new(apistatus.ObjectNotFound) - } - require.Fail(t, "unexpected remote HEAD") - return nil, fmt.Errorf("unexpected remote HEAD") - } - - localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD") - if allowDrop { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - ch.SetID(requiredChunkID) - ch.Index = 0 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - var pullCounter atomic.Int64 - var dropped []oid.Address - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(headFn), - WithLocalObjectHeaderFunc(localHeadF), - WithReplicator(&testReplicator{ - handlePullTask: (func(ctx context.Context, r replicator.Task) { - require.True(t, r.Addr.Container() == chunkAddress.Container() && r.Addr.Object() == requiredChunkID && - len(r.Nodes) == 1 && bytes.Equal(r.Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid pull task") - pullCounter.Add(1) - }), - }), - WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) { - require.True(t, allowDrop, "invalid redundent copy call") - dropped = append(dropped, a) - }), - WithPool(testPool(t)), - ) - - err := p.processObject(context.Background(), objInfo) - require.NoError(t, err) - require.Equal(t, int64(1), pullCounter.Load(), "invalid pull count") - require.Equal(t, 0, len(dropped), "invalid dropped count") - allowDrop = true - err = p.processObject(context.Background(), objInfo) - require.NoError(t, err) - require.Equal(t, int64(1), pullCounter.Load(), "invalid pull count") - require.Equal(t, 1, len(dropped), "invalid dropped count") - require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object") - }) - - t.Run("node0 has chunk0 and chunk1, node1 has chunk1", func(t *testing.T) { - // policer should drop chunk1 - headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw { - return chunkObject, nil - } - if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkAddress.Object()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = 2 - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - return nil, new(apistatus.ObjectNotFound) - } - require.Fail(t, "unexpected remote HEAD") - return nil, fmt.Errorf("unexpected remote HEAD") - } - - localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD") - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkAddress.Object()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - ch.SetID(oidtest.ID()) - ch.Index = 0 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - var dropped []oid.Address - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(headFn), - WithLocalObjectHeaderFunc(localHeadF), - WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) { - dropped = append(dropped, a) - }), - WithPool(testPool(t)), - ) - - err := p.processObject(context.Background(), objInfo) - require.NoError(t, err) - require.Equal(t, 1, len(dropped), "invalid dropped count") - require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object") - }) - - t.Run("node0 has chunk0 and chunk1, node1 has no chunks", func(t *testing.T) { - // policer should replicate chunk1 to node1 on first run and drop chunk1 on node0 on second run - var secondRun bool - headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw { - if !secondRun { - return nil, new(apistatus.ObjectNotFound) - } - return chunkObject, nil - } - if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkAddress.Object()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(oidtest.ID()) - ch.Index = 2 - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() && - a.Object() == parentID && raw { - return nil, new(apistatus.ObjectNotFound) - } - require.Fail(t, "unexpected remote HEAD") - return nil, fmt.Errorf("unexpected remote HEAD") - } - - localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD") - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkAddress.Object()) - ch.Index = 1 - ch.Total = 3 - ei.AddChunk(ch) - ch.SetID(oidtest.ID()) - ch.Index = 0 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - var dropped []oid.Address - var replicated []replicator.Task - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(headFn), - WithLocalObjectHeaderFunc(localHeadF), - WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) { - dropped = append(dropped, a) - }), - WithReplicator(&testReplicator{ - handleReplicationTask: func(ctx context.Context, t replicator.Task, tr replicator.TaskResult) { - replicated = append(replicated, t) - }, - }), - WithPool(testPool(t)), - ) - - err := p.processObject(context.Background(), objInfo) - require.NoError(t, err) - require.Equal(t, 0, len(dropped), "invalid dropped count") - require.Equal(t, 1, len(replicated), "invalid replicated count") - require.Equal(t, chunkAddress, replicated[0].Addr, "invalid replicated object") - require.True(t, bytes.Equal(replicated[0].Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid replicate target") - - secondRun = true - err = p.processObject(context.Background(), objInfo) - require.NoError(t, err) - require.Equal(t, 1, len(replicated), "invalid replicated count") - require.Equal(t, chunkAddress, replicated[0].Addr, "invalid replicated object") - require.True(t, bytes.Equal(replicated[0].Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid replicate target") - require.Equal(t, 1, len(dropped), "invalid dropped count") - require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object") - }) -} - -func TestECChunkRestore(t *testing.T) { - // node0 has chunk0, node1 has chunk1 - // policer should replicate chunk0 to node2 on the first run - // then restore EC object and replicate chunk2 to node2 on the second run - t.Parallel() - - payload := make([]byte, 64) - rand.Read(payload) - parentAddress := oidtest.Address() - parentObject := objectSDK.New() - parentObject.SetContainerID(parentAddress.Container()) - parentObject.SetPayload(payload) - parentObject.SetPayloadSize(64) - objectSDK.CalculateAndSetPayloadChecksum(parentObject) - err := objectSDK.CalculateAndSetID(parentObject) - require.NoError(t, err) - id, _ := parentObject.ID() - parentAddress.SetObject(id) - - chunkIDs := make([]oid.ID, 3) - c, err := erasurecode.NewConstructor(2, 1) - require.NoError(t, err) - key, err := keys.NewPrivateKey() - require.NoError(t, err) - chunks, err := c.Split(parentObject, &key.PrivateKey) - require.NoError(t, err) - for i, ch := range chunks { - chunkIDs[i], _ = ch.ID() - } - - var policy netmapSDK.PlacementPolicy - require.NoError(t, policy.DecodeString("EC 2.1")) - - cnr := &container.Container{} - cnr.Value.Init() - cnr.Value.SetPlacementPolicy(policy) - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - if id.Equals(parentAddress.Container()) { - return cnr, nil - } - return nil, new(apistatus.ContainerNotFound) - }, - } - - nodes := make([]netmapSDK.NodeInfo, 4) - for i := range nodes { - nodes[i].SetPublicKey([]byte{byte(i)}) - } - - placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - if cnr.Equals(parentAddress.Container()) && obj.Equals(parentAddress.Object()) { - return [][]netmapSDK.NodeInfo{nodes}, nil - } - return nil, errors.New("unexpected placement build") - } - var secondRun bool - remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - require.True(t, raw, "remote header for parent object must be called with raw flag") - index := int(ni.PublicKey()[0]) - require.True(t, index == 1 || index == 2 || index == 3, "invalid node to get parent header") - require.True(t, a == parentAddress, "invalid address to get remote header") - if index == 1 { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkIDs[1]) - ch.Index = uint32(1) - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if index == 2 && secondRun { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkIDs[0]) - ch.Index = uint32(0) - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - return nil, new(apistatus.ObjectNotFound) - } - - localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) { - require.True(t, a == parentAddress, "invalid address to get remote header") - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkIDs[0]) - ch.Index = uint32(0) - ch.Total = 3 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - var replicatedObj []*objectSDK.Object - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(remoteHeadFn), - WithLocalObjectHeaderFunc(localHeadFn), - WithReplicator(&testReplicator{ - handleReplicationTask: func(ctx context.Context, t replicator.Task, tr replicator.TaskResult) { - if t.Obj != nil { - replicatedObj = append(replicatedObj, t.Obj) - } - }, - }), - WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - require.True(t, a.Container() == parentAddress.Container() && a.Object() == chunkIDs[0], "invalid local object request") - return chunks[0], nil - }), - WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) { - index := ni.PublicKey()[0] - if index == 2 { - return nil, new(apistatus.ObjectNotFound) - } - return chunks[index], nil - }), - WithPool(testPool(t)), - WithKeyStorage(util.NewKeyStorage(&key.PrivateKey, nil, nil)), - ) - - var chunkAddress oid.Address - chunkAddress.SetContainer(parentAddress.Container()) - chunkAddress.SetObject(chunkIDs[0]) - objInfo := objectcore.Info{ - Address: chunkAddress, - Type: objectSDK.TypeRegular, - ECInfo: &objectcore.ECInfo{ - ParentID: parentAddress.Object(), - Index: 0, - Total: 3, - }, - } - err = p.processObject(context.Background(), objInfo) - require.NoError(t, err) - secondRun = true - err = p.processObject(context.Background(), objInfo) - require.NoError(t, err) - - require.Equal(t, 1, len(replicatedObj), "invalid replicated objects count") - chunks[2].SetSignature(nil) - expectedData, err := chunks[2].MarshalJSON() - require.NoError(t, err) - replicatedObj[0].SetSignature(nil) - actualData, err := replicatedObj[0].MarshalJSON() - require.NoError(t, err) - require.EqualValues(t, string(expectedData), string(actualData), "invalid restored objects") -} - -func TestECChunkRestoreNodeOff(t *testing.T) { - // node0 has chunk0, node1 has chunk1, node2 has chunk2, node3 is out of netmap - t.Parallel() - - payload := make([]byte, 64) - rand.Read(payload) - parentAddress := oidtest.Address() - parentObject := objectSDK.New() - parentObject.SetContainerID(parentAddress.Container()) - parentObject.SetPayload(payload) - parentObject.SetPayloadSize(64) - objectSDK.CalculateAndSetPayloadChecksum(parentObject) - err := objectSDK.CalculateAndSetID(parentObject) - require.NoError(t, err) - id, _ := parentObject.ID() - parentAddress.SetObject(id) - - chunkIDs := make([]oid.ID, 4) - c, err := erasurecode.NewConstructor(3, 1) - require.NoError(t, err) - key, err := keys.NewPrivateKey() - require.NoError(t, err) - chunks, err := c.Split(parentObject, &key.PrivateKey) - require.NoError(t, err) - for i, ch := range chunks { - chunkIDs[i], _ = ch.ID() - } - - var policy netmapSDK.PlacementPolicy - require.NoError(t, policy.DecodeString("EC 3.1")) - - cnr := &container.Container{} - cnr.Value.Init() - cnr.Value.SetPlacementPolicy(policy) - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - if id.Equals(parentAddress.Container()) { - return cnr, nil - } - return nil, new(apistatus.ContainerNotFound) - }, - } - - nodes := make([]netmapSDK.NodeInfo, 3) - for i := range nodes { - nodes[i].SetPublicKey([]byte{byte(i)}) - } - - placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - if cnr.Equals(parentAddress.Container()) && obj.Equals(parentAddress.Object()) { - return [][]netmapSDK.NodeInfo{nodes}, nil - } - return nil, errors.New("unexpected placement build") - } - remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - require.True(t, raw, "remote header for parent object must be called with raw flag") - index := int(ni.PublicKey()[0]) - require.True(t, index == 1 || index == 2, "invalid node to get parent header") - require.True(t, a == parentAddress, "invalid address to get remote header") - if index == 1 { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkIDs[1]) - ch.Index = uint32(1) - ch.Total = 4 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - if index == 2 { - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkIDs[2]) - ch.Index = uint32(2) - ch.Total = 4 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - return nil, new(apistatus.ObjectNotFound) - } - - localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) { - require.True(t, a == parentAddress, "invalid address to get remote header") - ei := objectSDK.NewECInfo() - var ch objectSDK.ECChunk - ch.SetID(chunkIDs[0]) - ch.Index = uint32(0) - ch.Total = 4 - ei.AddChunk(ch) - return nil, objectSDK.NewECInfoError(ei) - } - - var replicatedObj []*objectSDK.Object - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(remoteHeadFn), - WithLocalObjectHeaderFunc(localHeadFn), - WithReplicator(&testReplicator{ - handleLocalPutTask: func(ctx context.Context, task replicator.Task) { - if task.Obj != nil { - replicatedObj = append(replicatedObj, task.Obj) - } - }, - }), - WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - require.True(t, a.Container() == parentAddress.Container() && a.Object() == chunkIDs[0], "invalid local object request") - return chunks[0], nil - }), - WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) { - index := ni.PublicKey()[0] - return chunks[index], nil - }), - WithPool(testPool(t)), - WithKeyStorage(util.NewKeyStorage(&key.PrivateKey, nil, nil)), - ) - - var chunkAddress oid.Address - chunkAddress.SetContainer(parentAddress.Container()) - chunkAddress.SetObject(chunkIDs[0]) - objInfo := objectcore.Info{ - Address: chunkAddress, - Type: objectSDK.TypeRegular, - ECInfo: &objectcore.ECInfo{ - ParentID: parentAddress.Object(), - Index: 0, - Total: 4, - }, - } - err = p.processObject(context.Background(), objInfo) - require.NoError(t, err) - - require.Equal(t, 1, len(replicatedObj), "invalid replicated objects count") - chunks[3].SetSignature(nil) - expectedData, err := chunks[3].MarshalJSON() - require.NoError(t, err) - replicatedObj[0].SetSignature(nil) - actualData, err := replicatedObj[0].MarshalJSON() - require.NoError(t, err) - require.EqualValues(t, string(expectedData), string(actualData), "invalid restored objects") -} diff --git a/pkg/services/policer/metrics.go b/pkg/services/policer/metrics.go deleted file mode 100644 index c2ad2b0b5..000000000 --- a/pkg/services/policer/metrics.go +++ /dev/null @@ -1,9 +0,0 @@ -package policer - -type MetricsRegister interface { - IncProcessedObjects() -} - -type noopMetrics struct{} - -func (noopMetrics) IncProcessedObjects() {} diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go deleted file mode 100644 index c2157de5d..000000000 --- a/pkg/services/policer/nodecache.go +++ /dev/null @@ -1,42 +0,0 @@ -package policer - -import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - -type nodeProcessStatus int8 - -const ( - nodeNotProcessed nodeProcessStatus = iota - nodeDoesNotHoldObject - nodeHoldsObject - nodeStatusUnknown - nodeIsUnderMaintenance - nodeIsLocal -) - -func (st nodeProcessStatus) Processed() bool { - return st != nodeNotProcessed -} - -// nodeCache tracks Policer's check progress. -type nodeCache map[uint64]nodeProcessStatus - -func newNodeCache() nodeCache { - return make(map[uint64]nodeProcessStatus) -} - -func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) { - n[node.Hash()] = val -} - -// processStatus returns current processing status of the storage node. -func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { - return n[node.Hash()] -} - -// SubmitSuccessfulReplication marks given storage node as a current object -// replica holder. -// -// SubmitSuccessfulReplication implements replicator.TaskResult. -func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) { - n.set(node, nodeHoldsObject) -} diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go deleted file mode 100644 index 5d59604c2..000000000 --- a/pkg/services/policer/option.go +++ /dev/null @@ -1,214 +0,0 @@ -package policer - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" -) - -// KeySpaceIterator is the interface that allows iterating over the key space -// of local storage. -// Note that the underlying implementation might be circular: i.e. it can restart -// when the end of the key space is reached. -type KeySpaceIterator interface { - Next(context.Context, uint32) ([]objectcore.Info, error) - Rewind() -} - -// RedundantCopyCallback is a callback to pass -// the redundant local copy of the object. -type RedundantCopyCallback func(context.Context, oid.Address) - -// BuryFunc is the function to bury (i.e. inhume) an object. -type BuryFunc func(context.Context, oid.Address) error - -// Replicator is the interface to a consumer of replication tasks. -type Replicator interface { - HandleReplicationTask(ctx context.Context, task replicator.Task, res replicator.TaskResult) - HandlePullTask(ctx context.Context, task replicator.Task) - HandleLocalPutTask(ctx context.Context, task replicator.Task) -} - -// RemoteObjectHeaderFunc is the function to obtain HEAD info from a specific remote node. -type RemoteObjectHeaderFunc func(context.Context, netmapSDK.NodeInfo, oid.Address, bool) (*objectSDK.Object, error) - -// LocalObjectHeaderFunc is the function to obtain HEAD info from the current node. -type LocalObjectHeaderFunc func(context.Context, oid.Address) (*objectSDK.Object, error) - -type RemoteObjectGetFunc func(context.Context, netmapSDK.NodeInfo, oid.Address) (*objectSDK.Object, error) - -type LocalObjectGetFunc func(context.Context, oid.Address) (*objectSDK.Object, error) - -type cfg struct { - headTimeout time.Duration - - log *logger.Logger - - keySpaceIterator KeySpaceIterator - - buryFn BuryFunc - - cnrSrc container.Source - - placementBuilder placement.Builder - - remoteHeader RemoteObjectHeaderFunc - - localHeader LocalObjectHeaderFunc - - netmapKeys netmap.AnnouncedKeys - - replicator Replicator - - cbRedundantCopy RedundantCopyCallback - - taskPool *ants.Pool - - batchSize, cacheSize uint32 - - evictDuration, sleepDuration time.Duration - - metrics MetricsRegister - - remoteObject RemoteObjectGetFunc - - localObject LocalObjectGetFunc - - keyStorage *util.KeyStorage -} - -func defaultCfg() *cfg { - return &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - batchSize: 10, - cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB - sleepDuration: 1 * time.Second, - evictDuration: 30 * time.Second, - metrics: noopMetrics{}, - } -} - -// Option is an option for Policer constructor. -type Option func(*cfg) - -// WithHeadTimeout returns option to set Head timeout of Policer. -func WithHeadTimeout(v time.Duration) Option { - return func(c *cfg) { - c.headTimeout = v - } -} - -// WithLogger returns option to set Logger of Policer. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} - -func WithKeySpaceIterator(it KeySpaceIterator) Option { - return func(c *cfg) { - c.keySpaceIterator = it - } -} - -func WithBuryFunc(f BuryFunc) Option { - return func(c *cfg) { - c.buryFn = f - } -} - -// WithContainerSource returns option to set container source of Policer. -func WithContainerSource(v container.Source) Option { - return func(c *cfg) { - c.cnrSrc = v - } -} - -// WithPlacementBuilder returns option to set object placement builder of Policer. -func WithPlacementBuilder(v placement.Builder) Option { - return func(c *cfg) { - c.placementBuilder = v - } -} - -// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer. -func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option { - return func(c *cfg) { - c.remoteHeader = v - } -} - -// WithLocalObjectHeaderFunc returns option to set local object header receiver of Policer. -func WithLocalObjectHeaderFunc(v LocalObjectHeaderFunc) Option { - return func(c *cfg) { - c.localHeader = v - } -} - -func WithRemoteObjectGetFunc(v RemoteObjectGetFunc) Option { - return func(c *cfg) { - c.remoteObject = v - } -} - -func WithLocalObjectGetFunc(v LocalObjectGetFunc) Option { - return func(c *cfg) { - c.localObject = v - } -} - -// WithNetmapKeys returns option to set tool to work with announced public keys. -func WithNetmapKeys(v netmap.AnnouncedKeys) Option { - return func(c *cfg) { - c.netmapKeys = v - } -} - -// WithReplicator returns option to set object replicator of Policer. -func WithReplicator(v Replicator) Option { - return func(c *cfg) { - c.replicator = v - } -} - -// WithRedundantCopyCallback returns option to set -// callback to pass redundant local object copies -// detected by Policer. -func WithRedundantCopyCallback(cb RedundantCopyCallback) Option { - return func(c *cfg) { - c.cbRedundantCopy = cb - } -} - -// WithPool returns option to set pool for -// policy and replication operations. -func WithPool(p *ants.Pool) Option { - return func(c *cfg) { - c.taskPool = p - } -} - -// WithMetrics returns option to set metrics. -func WithMetrics(m MetricsRegister) Option { - return func(c *cfg) { - c.metrics = m - } -} - -func WithKeyStorage(ks *util.KeyStorage) Option { - return func(c *cfg) { - c.keyStorage = ks - } -} diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go deleted file mode 100644 index c91e7cc7c..000000000 --- a/pkg/services/policer/policer.go +++ /dev/null @@ -1,68 +0,0 @@ -package policer - -import ( - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - lru "github.com/hashicorp/golang-lru/v2" -) - -type objectsInWork struct { - sync.RWMutex - objs map[oid.Address]struct{} -} - -func (oiw *objectsInWork) inWork(addr oid.Address) bool { - oiw.RLock() - _, ok := oiw.objs[addr] - oiw.RUnlock() - - return ok -} - -func (oiw *objectsInWork) remove(addr oid.Address) { - oiw.Lock() - delete(oiw.objs, addr) - oiw.Unlock() -} - -func (oiw *objectsInWork) add(addr oid.Address) bool { - oiw.Lock() - _, exists := oiw.objs[addr] - oiw.objs[addr] = struct{}{} - oiw.Unlock() - return !exists -} - -// Policer represents the utility that verifies -// compliance with the object storage policy. -type Policer struct { - *cfg - - cache *lru.Cache[oid.Address, time.Time] - - objsInWork *objectsInWork -} - -// New creates, initializes and returns Policer instance. -func New(opts ...Option) *Policer { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) - - return &Policer{ - cfg: c, - cache: cache, - objsInWork: &objectsInWork{ - objs: make(map[oid.Address]struct{}, c.taskPool.Cap()), - }, - } -} diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go deleted file mode 100644 index 049c33753..000000000 --- a/pkg/services/policer/policer_test.go +++ /dev/null @@ -1,486 +0,0 @@ -package policer - -import ( - "bytes" - "context" - "errors" - "slices" - "sort" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/panjf2000/ants/v2" - "github.com/stretchr/testify/require" -) - -func TestBuryObjectWithoutContainer(t *testing.T) { - // Key space - addr := oidtest.Address() - objs := []objectcore.Info{ - { - Address: addr, - Type: objectSDK.TypeRegular, - }, - } - - // Container source and bury function - buryCh := make(chan oid.Address) - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - return nil, new(apistatus.ContainerNotFound) - }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return &container.DelInfo{}, nil - }, - } - buryFn := func(ctx context.Context, a oid.Address) error { - buryCh <- a - return nil - } - - // Policer instance - p := New( - WithKeySpaceIterator(&sliceKeySpaceIterator{objs: objs}), - WithContainerSource(containerSrc), - WithBuryFunc(buryFn), - WithPool(testPool(t)), - ) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go p.Run(ctx) - - require.Equal(t, addr, <-buryCh) -} - -func TestProcessObject(t *testing.T) { - // Notes: - // - nodes are referred to by their index throughout, which is embedded in the public key - // - node with index 0 always refers to the local node, so there's no need to add it to objHolders - // - policy is used only to match the number of replicas for each index in the placement - tests := []struct { - desc string - objType objectSDK.Type - nodeCount int - policy string - placement [][]int - objHolders []int - maintenanceNodes []int - wantRemoveRedundant bool - wantReplicateTo []int - headResult map[int]error - ecInfo *objectcore.ECInfo - }{ - { - desc: "1 copy already held by local node", - nodeCount: 1, - policy: `REP 1`, - placement: [][]int{{0}}, - }, - { - desc: "1 copy already held by the remote node", - nodeCount: 2, - policy: `REP 1`, - placement: [][]int{{1}}, - objHolders: []int{1}, - wantRemoveRedundant: true, - }, - { - desc: "1 copy not yet held by the remote node", - nodeCount: 2, - policy: `REP 1`, - placement: [][]int{{1}}, - wantReplicateTo: []int{1}, - }, - { - desc: "2 copies already held by local and remote node", - nodeCount: 2, - policy: `REP 2`, - placement: [][]int{{0, 1}}, - objHolders: []int{1}, - }, - { - desc: "2 copies but not held by remote node", - nodeCount: 2, - policy: `REP 2`, - placement: [][]int{{0, 1}}, - wantReplicateTo: []int{1}, - }, - { - desc: "multiple vectors already held by remote node", - nodeCount: 2, - policy: `REP 2 REP 2`, - placement: [][]int{{0, 1}, {0, 1}}, - objHolders: []int{1}, - }, - { - desc: "multiple vectors not yet held by remote node", - nodeCount: 2, - policy: `REP 2 REP 2`, - placement: [][]int{{0, 1}, {0, 1}}, - wantReplicateTo: []int{1}, - }, - { - desc: "lock object must be replicated to all nodes", - objType: objectSDK.TypeLock, - nodeCount: 3, - policy: `REP 1`, - placement: [][]int{{0, 1, 2}}, - wantReplicateTo: []int{1, 2}, - }, - { - desc: "preserve local copy when maintenance nodes exist", - nodeCount: 3, - policy: `REP 2`, - placement: [][]int{{1, 2}}, - objHolders: []int{1}, - maintenanceNodes: []int{2}, - }, - { - desc: "preserve local copy when node response with MAINTENANCE", - nodeCount: 3, - policy: `REP 2`, - placement: [][]int{{1, 2}}, - objHolders: []int{1}, - headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)}, - }, - { - desc: "lock object must be replicated to all EC nodes", - objType: objectSDK.TypeLock, - nodeCount: 3, - policy: `EC 1.1`, - placement: [][]int{{0, 1, 2}}, - wantReplicateTo: []int{1, 2}, - }, - { - desc: "tombstone object must be replicated to all EC nodes", - objType: objectSDK.TypeTombstone, - nodeCount: 3, - policy: `EC 1.1`, - placement: [][]int{{0, 1, 2}}, - wantReplicateTo: []int{1, 2}, - }, - { - desc: "do not remove local copy when MAINTENANCE status is cached", - objType: objectSDK.TypeRegular, - nodeCount: 3, - policy: `REP 1 REP 1`, - placement: [][]int{{1, 2}, {1, 0}}, - headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)}, - }, - } - - for i := range tests { - ti := tests[i] - t.Run(ti.desc, func(t *testing.T) { - addr := oidtest.Address() - - // Netmap, placement policy and placement builder - nodes := make([]netmap.NodeInfo, ti.nodeCount) - for i := range nodes { - nodes[i].SetPublicKey([]byte{byte(i)}) - } - for _, i := range ti.maintenanceNodes { - nodes[i].SetStatus(netmap.Maintenance) - } - - var policy netmap.PlacementPolicy - require.NoError(t, policy.DecodeString(ti.policy)) - - placementVectors := make([][]netmap.NodeInfo, len(ti.placement)) - for i, pv := range ti.placement { - for _, nj := range pv { - placementVectors[i] = append(placementVectors[i], nodes[nj]) - } - } - placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { - if cnr.Equals(addr.Container()) && obj != nil && obj.Equals(addr.Object()) { - return placementVectors, nil - } - if ti.ecInfo != nil && cnr.Equals(addr.Container()) && obj != nil && obj.Equals(ti.ecInfo.ParentID) { - return placementVectors, nil - } - t.Errorf("unexpected placement build: cid=%v oid=%v", cnr, obj) - return nil, errors.New("unexpected placement build") - } - - // Object remote header - headFn := func(_ context.Context, ni netmap.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) { - index := int(ni.PublicKey()[0]) - if a != addr || index < 1 || index >= ti.nodeCount { - t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a) - return nil, errors.New("unexpected object head") - } - if ti.headResult != nil { - if err, ok := ti.headResult[index]; ok { - return nil, err - } - } - if slices.Contains(ti.objHolders, index) { - return nil, nil - } - return nil, new(apistatus.ObjectNotFound) - } - - // Container source - cnr := &container.Container{} - cnr.Value.Init() - cnr.Value.SetPlacementPolicy(policy) - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - if id.Equals(addr.Container()) { - return cnr, nil - } - t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container()) - return nil, new(apistatus.ContainerNotFound) - }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return &container.DelInfo{}, nil - }, - } - buryFn := func(ctx context.Context, a oid.Address) error { - t.Errorf("unexpected object buried: %v", a) - return nil - } - - // Policer instance - var gotRemoveRedundant bool - var gotReplicateTo []int - - p := New( - WithContainerSource(containerSrc), - WithPlacementBuilder(placementBuilderFunc(placementBuilder)), - WithNetmapKeys(announcedKeysFunc(func(k []byte) bool { - return bytes.Equal(k, nodes[0].PublicKey()) - })), - WithRemoteObjectHeaderFunc(headFn), - WithBuryFunc(buryFn), - WithRedundantCopyCallback(func(_ context.Context, a oid.Address) { - require.True(t, a.Equals(addr), "unexpected redundant copy callback: a=%v", a) - gotRemoveRedundant = true - }), - WithReplicator(&testReplicator{ - handleReplicationTask: func(_ context.Context, task replicator.Task, res replicator.TaskResult) { - require.True(t, task.Addr.Equals(addr), "unexpected replicator task: %+v", task) - for _, node := range task.Nodes { - gotReplicateTo = append(gotReplicateTo, int(node.PublicKey()[0])) - } - }, - }), - WithPool(testPool(t)), - ) - - addrWithType := objectcore.Info{ - Address: addr, - Type: ti.objType, - ECInfo: ti.ecInfo, - } - - err := p.processObject(context.Background(), addrWithType) - require.NoError(t, err) - sort.Ints(gotReplicateTo) - - require.Equal(t, ti.wantRemoveRedundant, gotRemoveRedundant) - require.Equal(t, ti.wantReplicateTo, gotReplicateTo) - }) - } -} - -func TestProcessObjectError(t *testing.T) { - addr := oidtest.Address() - // Container source - cnr := &container.Container{} - cnr.Value.Init() - source := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - return nil, new(apistatus.ContainerNotFound) - }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return nil, new(apistatus.ContainerNotFound) - }, - } - buryFn := func(ctx context.Context, a oid.Address) error { - t.Errorf("unexpected object buried: %v", a) - return nil - } - p := New( - WithContainerSource(source), - WithBuryFunc(buryFn), - WithPool(testPool(t)), - ) - - addrWithType := objectcore.Info{ - Address: addr, - } - - require.True(t, client.IsErrContainerNotFound(p.processObject(context.Background(), addrWithType))) -} - -func TestIteratorContract(t *testing.T) { - addr := oidtest.Address() - objs := []objectcore.Info{{ - Address: addr, - Type: objectSDK.TypeRegular, - }} - - buryFn := func(ctx context.Context, a oid.Address) error { - return nil - } - - it := &predefinedIterator{ - scenario: []nextResult{ - {objs, nil}, - {nil, errors.New("opaque")}, - {nil, engine.ErrEndOfListing}, - {nil, engine.ErrEndOfListing}, - {nil, errors.New("opaque")}, - {objs, engine.ErrEndOfListing}, - }, - finishCh: make(chan struct{}), - } - - containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { - return nil, new(apistatus.ContainerNotFound) - }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return &container.DelInfo{}, nil - }, - } - - p := New( - WithKeySpaceIterator(it), - WithContainerSource(containerSrc), - WithBuryFunc(buryFn), - WithPool(testPool(t)), - func(c *cfg) { - c.sleepDuration = time.Millisecond - }, - ) - - ctx, cancel := context.WithCancel(context.Background()) - go p.Run(ctx) - - <-it.finishCh - cancel() - require.Equal(t, []string{ - "Next", - "Next", - "Next", - "Rewind", - "Next", - "Rewind", - "Next", - "Next", - "Rewind", - }, it.calls) -} - -func testPool(t *testing.T) *ants.Pool { - pool, err := ants.NewPool(4) - require.NoError(t, err) - return pool -} - -type nextResult struct { - objs []objectcore.Info - err error -} - -type predefinedIterator struct { - scenario []nextResult - finishCh chan struct{} - pos int - calls []string -} - -func (it *predefinedIterator) Next(ctx context.Context, size uint32) ([]objectcore.Info, error) { - if it.pos == len(it.scenario) { - close(it.finishCh) - <-ctx.Done() - return nil, nil - } - - res := it.scenario[it.pos] - it.pos += 1 - it.calls = append(it.calls, "Next") - return res.objs, res.err -} - -func (it *predefinedIterator) Rewind() { - it.calls = append(it.calls, "Rewind") -} - -// sliceKeySpaceIterator is a KeySpaceIterator backed by a slice. -type sliceKeySpaceIterator struct { - objs []objectcore.Info - cur int -} - -func (it *sliceKeySpaceIterator) Next(_ context.Context, size uint32) ([]objectcore.Info, error) { - if it.cur >= len(it.objs) { - return nil, engine.ErrEndOfListing - } - end := min(it.cur+int(size), len(it.objs)) - ret := it.objs[it.cur:end] - it.cur = end - return ret, nil -} - -func (it *sliceKeySpaceIterator) Rewind() { - it.cur = 0 -} - -type containerSrc struct { - get func(ctx context.Context, id cid.ID) (*container.Container, error) - deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error) -} - -func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) { - return f.get(ctx, id) -} - -func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return f.deletionInfo(ctx, id) -} - -// placementBuilderFunc is a placement.Builder backed by a function -type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) - -func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { - return f(c, o, p) -} - -// announcedKeysFunc is a netmap.AnnouncedKeys backed by a function. -type announcedKeysFunc func([]byte) bool - -func (f announcedKeysFunc) IsLocalKey(k []byte) bool { return f(k) } - -type testReplicator struct { - handleReplicationTask func(ctx context.Context, task replicator.Task, res replicator.TaskResult) - handleLocalPutTask func(ctx context.Context, task replicator.Task) - handlePullTask func(ctx context.Context, task replicator.Task) -} - -func (r *testReplicator) HandleReplicationTask(ctx context.Context, task replicator.Task, res replicator.TaskResult) { - r.handleReplicationTask(ctx, task, res) -} - -func (r *testReplicator) HandleLocalPutTask(ctx context.Context, task replicator.Task) { - r.handleLocalPutTask(ctx, task) -} - -func (r *testReplicator) HandlePullTask(ctx context.Context, task replicator.Task) { - r.handlePullTask(ctx, task) -} diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go deleted file mode 100644 index 635a5683b..000000000 --- a/pkg/services/policer/process.go +++ /dev/null @@ -1,113 +0,0 @@ -package policer - -import ( - "context" - "errors" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.uber.org/zap" -) - -func (p *Policer) Run(ctx context.Context) { - p.shardPolicyWorker(ctx) - p.log.Info(ctx, logs.PolicerRoutineStopped) -} - -func (p *Policer) shardPolicyWorker(ctx context.Context) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String()) - for { - select { - case <-ctx.Done(): - p.taskPool.Release() - return - default: - } - - addrs, err := p.keySpaceIterator.Next(ctx, p.batchSize) - if err != nil { - if errors.Is(err, engine.ErrEndOfListing) { - p.keySpaceIterator.Rewind() - time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit - continue - } - p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) - } - - skipMap := newSkipMap() - for i := range addrs { - select { - case <-ctx.Done(): - p.taskPool.Release() - return - default: - addr := addrs[i] - if p.objsInWork.inWork(addr.Address) { - // do not process an object - // that is in work - continue - } - - err := p.taskPool.Submit(func() { - v, ok := p.cache.Get(addr.Address) - if ok && time.Since(v) < p.evictDuration { - return - } - - if p.objsInWork.add(addr.Address) { - err := p.processObject(ctx, addr) - if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) { - p.log.Error(ctx, logs.PolicerUnableToProcessObj, - zap.Stringer("object", addr.Address), - zap.Error(err)) - } - p.cache.Add(addr.Address, time.Now()) - p.objsInWork.remove(addr.Address) - p.metrics.IncProcessedObjects() - } - }) - if err != nil { - p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err)) - } - } - } - } -} - -type errMap struct { - sync.Mutex - skipMap map[cid.ID][]error -} - -func newSkipMap() *errMap { - return &errMap{ - skipMap: make(map[cid.ID][]error), - } -} - -// addSeenError marks err as seen error for the container. -// Returns true is the error has already been added. -func (m *errMap) addSeenError(cnr cid.ID, err error) bool { - m.Lock() - defer m.Unlock() - - for _, e := range m.skipMap[cnr] { - if errors.Is(err, e) { - return true - } - } - - // Restrict list length to avoid possible OOM if some random error is added in future. - const maxErrListLength = 10 - - lst := m.skipMap[cnr] - if len(lst) < maxErrListLength { - m.skipMap[cnr] = append(lst, err) - } - return false -} diff --git a/pkg/services/replicator/metrics.go b/pkg/services/replicator/metrics.go deleted file mode 100644 index 3fc062926..000000000 --- a/pkg/services/replicator/metrics.go +++ /dev/null @@ -1,8 +0,0 @@ -package replicator - -type MetricsRegister interface { - IncInFlightRequest() - DecInFlightRequest() - IncProcessedObjects() - AddPayloadSize(size int64) -} diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go deleted file mode 100644 index 8c6f0df06..000000000 --- a/pkg/services/replicator/process.go +++ /dev/null @@ -1,89 +0,0 @@ -package replicator - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -// TaskResult is a replication result interface. -type TaskResult interface { - // SubmitSuccessfulReplication submits the successful object replication - // to the given node. - SubmitSuccessfulReplication(netmap.NodeInfo) -} - -// HandleReplicationTask executes replication task inside invoking goroutine. -// Passes all the nodes that accepted the replication to the TaskResult. -func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res TaskResult) { - p.metrics.IncInFlightRequest() - defer p.metrics.DecInFlightRequest() - defer func() { - p.log.Debug(ctx, logs.ReplicatorFinishWork, - zap.Uint32("amount of unfinished replicas", task.NumCopies), - ) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleReplicateTask", - trace.WithAttributes( - attribute.Stringer("address", task.Addr), - attribute.Int64("number_of_copies", int64(task.NumCopies)), - )) - defer span.End() - - if task.Obj == nil { - var err error - task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr) - if err != nil { - p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage, - zap.Stringer("object", task.Addr), - zap.Error(err)) - - return - } - } - - prm := new(objectwriter.RemotePutPrm). - WithObject(task.Obj) - - for i := 0; task.NumCopies > 0 && i < len(task.Nodes); i++ { - select { - case <-ctx.Done(): - return - default: - } - - log := p.log.With( - zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])), - zap.Stringer("object", task.Addr), - ) - - callCtx, cancel := context.WithTimeout(ctx, p.putTimeout) - - err := p.remoteSender.PutObject(callCtx, prm.WithNodeInfo(task.Nodes[i])) - - cancel() - - if err != nil { - log.Error(ctx, logs.ReplicatorCouldNotReplicateObject, - zap.Error(err), - ) - } else { - log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated) - - task.NumCopies-- - - res.SubmitSuccessfulReplication(task.Nodes[i]) - - p.metrics.IncProcessedObjects() - p.metrics.AddPayloadSize(int64(task.Obj.PayloadSize())) - } - } -} diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go deleted file mode 100644 index 216fe4919..000000000 --- a/pkg/services/replicator/pull.go +++ /dev/null @@ -1,66 +0,0 @@ -package replicator - -import ( - "context" - "errors" - "slices" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var errFailedToGetObjectFromAnyNode = errors.New("failed to get object from any node") - -func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { - p.metrics.IncInFlightRequest() - defer p.metrics.DecInFlightRequest() - defer func() { - p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull")) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask", - trace.WithAttributes( - attribute.Stringer("address", task.Addr), - attribute.Int("nodes_count", len(task.Nodes)), - )) - defer span.End() - - var obj *objectSDK.Object - - for _, node := range task.Nodes { - var err error - obj, err = p.remoteGetter.Get(ctx, getsvc.RemoteGetPrm{ - Address: task.Addr, - Node: node, - }) - if err == nil { - break - } - endpoints := slices.Collect(node.NetworkEndpoints()) - p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, - zap.Stringer("object", task.Addr), - zap.Error(err), - zap.Strings("endpoints", endpoints)) - } - - if obj == nil { - p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, - zap.Stringer("object", task.Addr), - zap.Error(errFailedToGetObjectFromAnyNode)) - return - } - - err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container)) - if err != nil { - p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, - zap.Stringer("object", task.Addr), - zap.Error(err)) - } -} diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go deleted file mode 100644 index bcad8471d..000000000 --- a/pkg/services/replicator/put.go +++ /dev/null @@ -1,45 +0,0 @@ -package replicator - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -var errObjectNotDefined = errors.New("object is not defined") - -func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { - p.metrics.IncInFlightRequest() - defer p.metrics.DecInFlightRequest() - defer func() { - p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull")) - }() - - ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask", - trace.WithAttributes( - attribute.Stringer("address", task.Addr), - attribute.Int("nodes_count", len(task.Nodes)), - )) - defer span.End() - - if task.Obj == nil { - p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, - zap.Stringer("object", task.Addr), - zap.Error(errObjectNotDefined)) - return - } - - err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container)) - if err != nil { - p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, - zap.Stringer("object", task.Addr), - zap.Error(err)) - } -} diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go deleted file mode 100644 index a940cef37..000000000 --- a/pkg/services/replicator/replicator.go +++ /dev/null @@ -1,90 +0,0 @@ -package replicator - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// Replicator represents the utility that replicates -// local objects to remote nodes. -type Replicator struct { - *cfg -} - -// Option is an option for Policer constructor. -type Option func(*cfg) - -type cfg struct { - putTimeout time.Duration - - log *logger.Logger - - remoteSender *objectwriter.RemoteSender - - remoteGetter *getsvc.RemoteGetter - - localStorage *engine.StorageEngine - - metrics MetricsRegister -} - -func defaultCfg() *cfg { - return &cfg{} -} - -// New creates, initializes and returns Replicator instance. -func New(opts ...Option) *Replicator { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - return &Replicator{ - cfg: c, - } -} - -// WithPutTimeout returns option to set Put timeout of Replicator. -func WithPutTimeout(v time.Duration) Option { - return func(c *cfg) { - c.putTimeout = v - } -} - -// WithLogger returns option to set Logger of Replicator. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} - -// WithRemoteSender returns option to set remote object sender of Replicator. -func WithRemoteSender(v *objectwriter.RemoteSender) Option { - return func(c *cfg) { - c.remoteSender = v - } -} - -func WithRemoteGetter(v *getsvc.RemoteGetter) Option { - return func(c *cfg) { - c.remoteGetter = v - } -} - -// WithLocalStorage returns option to set local object storage of Replicator. -func WithLocalStorage(v *engine.StorageEngine) Option { - return func(c *cfg) { - c.localStorage = v - } -} - -func WithMetrics(v MetricsRegister) Option { - return func(c *cfg) { - c.metrics = v - } -} diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go deleted file mode 100644 index a03f8dcaa..000000000 --- a/pkg/services/replicator/task.go +++ /dev/null @@ -1,22 +0,0 @@ -package replicator - -import ( - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Task represents group of Replicator task parameters. -type Task struct { - // NumCopies is the number of copies to replicate. - NumCopies uint32 - // Addr is the address of the local object. - Addr oid.Address - // Obj is the object to avoid fetching it from the local storage. - Obj *objectSDK.Object - // Nodes is a list of potential object holders. - Nodes []netmap.NodeInfo - - Container containerSDK.Container -} diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go deleted file mode 100644 index f0591de71..000000000 --- a/pkg/services/session/executor.go +++ /dev/null @@ -1,48 +0,0 @@ -package session - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "go.uber.org/zap" -) - -type ServiceExecutor interface { - Create(context.Context, *session.CreateRequestBody) (*session.CreateResponseBody, error) -} - -type executorSvc struct { - exec ServiceExecutor - - respSvc *response.Service - - log *logger.Logger -} - -// NewExecutionService wraps ServiceExecutor and returns Session Service interface. -func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *logger.Logger) Server { - return &executorSvc{ - exec: exec, - log: l, - respSvc: respSvc, - } -} - -func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) - - respBody, err := s.exec.Create(ctx, req.GetBody()) - if err != nil { - return nil, fmt.Errorf("could not execute Create request: %w", err) - } - - resp := new(session.CreateResponse) - resp.SetBody(respBody) - - s.respSvc.SetMeta(resp) - return resp, nil -} diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go deleted file mode 100644 index e8555a7c9..000000000 --- a/pkg/services/session/server.go +++ /dev/null @@ -1,12 +0,0 @@ -package session - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" -) - -// Server is an interface of the FrostFS API Session service server. -type Server interface { - Create(context.Context, *session.CreateRequest) (*session.CreateResponse, error) -} diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go deleted file mode 100644 index 3664c1403..000000000 --- a/pkg/services/session/sign.go +++ /dev/null @@ -1,31 +0,0 @@ -package session - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" -) - -type signService struct { - sigSvc *util.SignService - - svc Server -} - -func NewSignService(key *ecdsa.PrivateKey, svc Server) Server { - return &signService{ - sigSvc: util.NewUnarySignService(key), - svc: svc, - } -} - -func (s *signService) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - if err := s.sigSvc.VerifyRequest(req); err != nil { - resp := new(session.CreateResponse) - return resp, s.sigSvc.SignResponse(resp, err) - } - resp, err := util.EnsureNonNilResponse(s.svc.Create(ctx, req)) - return resp, s.sigSvc.SignResponse(resp, err) -} diff --git a/pkg/services/session/storage/persistent/encryption.go b/pkg/services/session/storage/persistent/encryption.go deleted file mode 100644 index f505fbbe4..000000000 --- a/pkg/services/session/storage/persistent/encryption.go +++ /dev/null @@ -1,32 +0,0 @@ -package persistent - -import ( - "crypto/rand" - "fmt" - "io" -) - -func (s *TokenStore) encrypt(value []byte) ([]byte, error) { - nonce := make([]byte, s.gcm.NonceSize()) - - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, fmt.Errorf("could not init random nonce: %w", err) - } - - return s.gcm.Seal(nonce, nonce, value, nil), nil -} - -func (s *TokenStore) decrypt(value []byte) ([]byte, error) { - nonceSize := s.gcm.NonceSize() - if len(value) < nonceSize { - return nil, fmt.Errorf( - "unexpected encrypted length: nonce length is %d, encrypted data length is %d", - nonceSize, - len(value), - ) - } - - nonce, encryptedData := value[:nonceSize], value[nonceSize:] - - return s.gcm.Open(nil, nonce, encryptedData, nil) -} diff --git a/pkg/services/session/storage/persistent/encryption_test.go b/pkg/services/session/storage/persistent/encryption_test.go deleted file mode 100644 index 642fa4007..000000000 --- a/pkg/services/session/storage/persistent/encryption_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package persistent - -import ( - "bytes" - "path/filepath" - "testing" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestTokenStore_Encryption(t *testing.T) { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - ts, err := NewTokenStore(filepath.Join(t.TempDir(), ".storage"), WithEncryptionKey(&pk.PrivateKey)) - require.NoError(t, err) - - data := []byte("nice encryption, awesome tests") - - encryptedData, err := ts.encrypt(data) - require.NoError(t, err) - require.False(t, bytes.Equal(data, encryptedData)) - - decryptedData, err := ts.decrypt(encryptedData) - require.NoError(t, err) - - require.Equal(t, data, decryptedData) -} diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go deleted file mode 100644 index ea0233f9a..000000000 --- a/pkg/services/session/storage/persistent/executor.go +++ /dev/null @@ -1,73 +0,0 @@ -package persistent - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.etcd.io/bbolt" -) - -// Create inits a new private session token using information -// from corresponding request, saves it to bolt database (and -// encrypts private keys if storage has been configured so). -// Returns response that is filled with just created token's -// ID and public key for it. -func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) { - idV2 := body.GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing owner") - } - - var id user.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid owner: %w", err) - } - - uidBytes, err := storage.NewTokenID() - if err != nil { - return nil, fmt.Errorf("could not generate token ID: %w", err) - } - - sk, err := keys.NewPrivateKey() - if err != nil { - return nil, err - } - - value, err := s.packToken(body.GetExpiration(), &sk.PrivateKey) - if err != nil { - return nil, err - } - - err = s.db.Update(func(tx *bbolt.Tx) error { - rootBucket := tx.Bucket(sessionsBucket) - - ownerBucket, err := rootBucket.CreateBucketIfNotExists(id.WalletBytes()) - if err != nil { - return fmt.Errorf( - "could not get/create %s owner bucket: %w", id, err) - } - - err = ownerBucket.Put(uidBytes, value) - if err != nil { - return fmt.Errorf("could not put session token for %s oid: %w", id, err) - } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("could not save token to persistent storage: %w", err) - } - - res := new(session.CreateResponseBody) - res.SetID(uidBytes) - res.SetSessionKey(sk.PublicKey().Bytes()) - - return res, nil -} diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go deleted file mode 100644 index f80ecb591..000000000 --- a/pkg/services/session/storage/persistent/executor_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package persistent - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" -) - -func TestTokenStore(t *testing.T) { - ts, err := NewTokenStore(filepath.Join(t.TempDir(), ".storage")) - require.NoError(t, err) - - defer ts.Close() - - owner := usertest.ID() - - var ownerV2 refs.OwnerID - owner.WriteToV2(&ownerV2) - - req := new(session.CreateRequestBody) - req.SetOwnerID(&ownerV2) - - const tokenNumber = 5 - - type tok struct { - id []byte - key []byte - } - - tokens := make([]tok, 0, tokenNumber) - - for i := range tokenNumber { - req.SetExpiration(uint64(i)) - - res, err := ts.Create(context.Background(), req) - require.NoError(t, err) - - tokens = append(tokens, tok{ - id: res.GetID(), - key: res.GetSessionKey(), - }) - } - - for i, token := range tokens { - savedToken := ts.Get(owner, token.id) - - require.Equal(t, uint64(i), savedToken.ExpiredAt()) - - equalKeys(t, token.key, savedToken.SessionKey()) - } -} - -func TestTokenStore_Persistent(t *testing.T) { - path := filepath.Join(t.TempDir(), ".storage") - - ts, err := NewTokenStore(path) - require.NoError(t, err) - - idOwner := usertest.ID() - - var idOwnerV2 refs.OwnerID - idOwner.WriteToV2(&idOwnerV2) - - const exp = 12345 - - req := new(session.CreateRequestBody) - req.SetOwnerID(&idOwnerV2) - req.SetExpiration(exp) - - res, err := ts.Create(context.Background(), req) - require.NoError(t, err) - - id := res.GetID() - pubKey := res.GetSessionKey() - - // close db (stop the node) - require.NoError(t, ts.Close()) - - // open persistent storage again - ts, err = NewTokenStore(path) - require.NoError(t, err) - - defer ts.Close() - - savedToken := ts.Get(idOwner, id) - - equalKeys(t, pubKey, savedToken.SessionKey()) -} - -func TestTokenStore_RemoveOld(t *testing.T) { - tests := []*struct { - epoch uint64 - id, key []byte - }{ - { - epoch: 1, - }, - { - epoch: 2, - }, - { - epoch: 3, - }, - { - epoch: 4, - }, - { - epoch: 5, - }, - { - epoch: 6, - }, - } - - ts, err := NewTokenStore(filepath.Join(t.TempDir(), ".storage")) - require.NoError(t, err) - - defer ts.Close() - - owner := usertest.ID() - - var ownerV2 refs.OwnerID - owner.WriteToV2(&ownerV2) - - req := new(session.CreateRequestBody) - req.SetOwnerID(&ownerV2) - - for _, test := range tests { - req.SetExpiration(test.epoch) - - res, err := ts.Create(context.Background(), req) - require.NoError(t, err) - - test.id = res.GetID() - test.key = res.GetSessionKey() - } - - const currEpoch = 3 - - ts.RemoveOld(currEpoch) - - for _, test := range tests { - token := ts.Get(owner, test.id) - - if test.epoch <= currEpoch { - require.Nil(t, token) - } else { - equalKeys(t, test.key, token.SessionKey()) - } - } -} - -// This test was added to fix bolt's behaviour since the persistent -// storage uses cursor and there is an issue about `cursor.Delete` -// method: https://github.com/etcd-io/bbolt/issues/146. -// -// If this test is passing, TokenStore works correctly. -func TestBolt_Cursor(t *testing.T) { - db, err := bbolt.Open(filepath.Join(t.TempDir(), ".storage"), 0o666, nil) - require.NoError(t, err) - - defer db.Close() - - cursorKeys := make(map[string]struct{}) - - bucketName := []byte("bucket") - - err = db.Update(func(tx *bbolt.Tx) (err error) { - b, err := tx.CreateBucket(bucketName) - if err != nil { - return err - } - - put := func(s []byte) { - if err == nil { - err = b.Put(s, s) - } - } - - put([]byte("1")) - put([]byte("2")) - put([]byte("3")) - put([]byte("4")) - - return - }) - - err = db.Update(func(tx *bbolt.Tx) error { - b := tx.Bucket(bucketName) - c := b.Cursor() - - for k, _ := c.First(); k != nil; k, _ = c.Next() { - // fill key that was viewed - cursorKeys[string(k)] = struct{}{} - - if bytes.Equal(k, []byte("1")) { - // delete the first one - err = c.Delete() - if err != nil { - return err - } - } - } - - return nil - }) - require.NoError(t, err) - - _, ok := cursorKeys["2"] - if !ok { - t.Fatal("unexpectedly skipped '2' value") - } -} - -func equalKeys(t *testing.T, sessionKey []byte, savedPrivateKey *ecdsa.PrivateKey) { - returnedPubKey, err := keys.NewPublicKeyFromBytes(sessionKey, elliptic.P256()) - require.NoError(t, err) - - savedPubKey := (keys.PublicKey)(savedPrivateKey.PublicKey) - - require.Equal(t, true, returnedPubKey.Equal(&savedPubKey)) -} diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go deleted file mode 100644 index 60db97f90..000000000 --- a/pkg/services/session/storage/persistent/options.go +++ /dev/null @@ -1,49 +0,0 @@ -package persistent - -import ( - "crypto/ecdsa" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -type cfg struct { - l *logger.Logger - timeout time.Duration - privateKey *ecdsa.PrivateKey -} - -// Option allows setting optional parameters of the TokenStore. -type Option func(*cfg) - -func defaultCfg() *cfg { - return &cfg{ - l: logger.NewLoggerWrapper(zap.L()), - timeout: 100 * time.Millisecond, - } -} - -// WithLogger returns an option to specify -// logger. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.l = v - } -} - -// WithTimeout returns option to specify -// database connection timeout. -func WithTimeout(v time.Duration) Option { - return func(c *cfg) { - c.timeout = v - } -} - -// WithEncryptionKey return an option to encrypt private -// session keys using provided private key. -func WithEncryptionKey(k *ecdsa.PrivateKey) Option { - return func(c *cfg) { - c.privateKey = k - } -} diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go deleted file mode 100644 index 132d62445..000000000 --- a/pkg/services/session/storage/persistent/storage.go +++ /dev/null @@ -1,154 +0,0 @@ -package persistent - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "encoding/hex" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -// TokenStore is a wrapper around persistent K:V db that -// allows creating (storing), retrieving and expiring -// (removing) session tokens. -type TokenStore struct { - db *bbolt.DB - - l *logger.Logger - - // optional AES-256 algorithm - // encryption in Galois/Counter - // Mode - gcm cipher.AEAD -} - -var sessionsBucket = []byte("sessions") - -// NewTokenStore creates, initializes and returns a new TokenStore instance. -// -// The elements of the instance are stored in bolt DB. -func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { - cfg := defaultCfg() - - for _, o := range opts { - o(cfg) - } - - db, err := bbolt.Open(path, 0o600, - &bbolt.Options{ - Timeout: cfg.timeout, - }) - if err != nil { - return nil, fmt.Errorf("can't open bbolt at %s: %w", path, err) - } - - err = db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(sessionsBucket) - return err - }) - if err != nil { - _ = db.Close() - - return nil, fmt.Errorf("could not init session bucket: %w", err) - } - - ts := &TokenStore{db: db, l: cfg.l} - - // enable encryption if it - // was configured so - if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) - cfg.privateKey.D.FillBytes(rawKey) - - c, err := aes.NewCipher(rawKey) - if err != nil { - return nil, fmt.Errorf("could not create cipher block: %w", err) - } - - gcm, err := cipher.NewGCM(c) - if err != nil { - return nil, fmt.Errorf("could not wrapp cipher block in Galois Counter Mode: %w", err) - } - - ts.gcm = gcm - } - - return ts, nil -} - -// Get returns private token corresponding to the given identifiers. -// -// Returns nil is there is no element in storage. -func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateToken) { - err := s.db.View(func(tx *bbolt.Tx) error { - rootBucket := tx.Bucket(sessionsBucket) - - ownerBucket := rootBucket.Bucket(ownerID.WalletBytes()) - if ownerBucket == nil { - return nil - } - - rawToken := ownerBucket.Get(tokenID) - if rawToken == nil { - return nil - } - - var err error - - t, err = s.unpackToken(rawToken) - return err - }) - if err != nil { - s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage, - zap.Error(err), - zap.Stringer("ownerID", ownerID), - zap.String("tokenID", hex.EncodeToString(tokenID)), - ) - } - - return -} - -// RemoveOld removes all tokens expired since provided epoch. -func (s *TokenStore) RemoveOld(epoch uint64) { - err := s.db.Update(func(tx *bbolt.Tx) error { - rootBucket := tx.Bucket(sessionsBucket) - - // iterating over ownerIDs - return iterateNestedBuckets(rootBucket, func(b *bbolt.Bucket) error { - c := b.Cursor() - var err error - - // iterating over fixed ownerID's tokens - for k, v := c.First(); k != nil; k, v = c.Next() { - if epochFromToken(v) <= epoch { - err = c.Delete() - if err != nil { - s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken, - zap.String("token_id", hex.EncodeToString(k)), - ) - } - } - } - - return nil - }) - }) - if err != nil { - s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens, - zap.Uint64("epoch", epoch), - ) - } -} - -// Close closes database connection. -func (s *TokenStore) Close() error { - return s.db.Close() -} diff --git a/pkg/services/session/storage/persistent/util.go b/pkg/services/session/storage/persistent/util.go deleted file mode 100644 index dff090078..000000000 --- a/pkg/services/session/storage/persistent/util.go +++ /dev/null @@ -1,76 +0,0 @@ -package persistent - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/binary" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - "go.etcd.io/bbolt" -) - -const keyOffset = 8 - -func (s *TokenStore) packToken(exp uint64, key *ecdsa.PrivateKey) ([]byte, error) { - rawKey, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, fmt.Errorf("could not marshal private key: %w", err) - } - - if s.gcm != nil { - rawKey, err = s.encrypt(rawKey) - if err != nil { - return nil, fmt.Errorf("could not encrypt session key: %w", err) - } - } - - res := make([]byte, keyOffset, keyOffset+len(rawKey)) - binary.LittleEndian.PutUint64(res, exp) - - res = append(res, rawKey...) - - return res, nil -} - -func (s *TokenStore) unpackToken(raw []byte) (*storage.PrivateToken, error) { - var err error - - epoch := epochFromToken(raw) - rawKey := raw[keyOffset:] - - if s.gcm != nil { - rawKey, err = s.decrypt(rawKey) - if err != nil { - return nil, fmt.Errorf("could not decrypt session key: %w", err) - } - } - - key, err := x509.ParseECPrivateKey(rawKey) - if err != nil { - return nil, fmt.Errorf("could not unmarshal private key: %w", err) - } - - return storage.NewPrivateToken(key, epoch), nil -} - -func epochFromToken(rawToken []byte) uint64 { - return binary.LittleEndian.Uint64(rawToken) -} - -func iterateNestedBuckets(b *bbolt.Bucket, fn func(b *bbolt.Bucket) error) error { - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - // nil value is a hallmark - // of the nested buckets - if v == nil { - err := fn(b.Bucket(k)) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/services/session/storage/persistent/util_test.go b/pkg/services/session/storage/persistent/util_test.go deleted file mode 100644 index 0cb81e7f9..000000000 --- a/pkg/services/session/storage/persistent/util_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package persistent - -import ( - "testing" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestPack(t *testing.T) { - key, err := keys.NewPrivateKey() - require.NoError(t, err) - - ts := new(TokenStore) - - const exp = 12345 - - raw, err := ts.packToken(exp, &key.PrivateKey) - require.NoError(t, err) - - require.Equal(t, uint64(exp), epochFromToken(raw)) - - unpacked, err := ts.unpackToken(raw) - require.NoError(t, err) - - require.Equal(t, uint64(exp), unpacked.ExpiredAt()) - require.Equal(t, true, key.Equal(unpacked.SessionKey())) -} diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go deleted file mode 100644 index 423e579d7..000000000 --- a/pkg/services/session/storage/temporary/executor.go +++ /dev/null @@ -1,54 +0,0 @@ -package temporary - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/mr-tron/base58" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) { - idV2 := body.GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing owner") - } - - var id user.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid owner: %w", err) - } - - uidBytes, err := storage.NewTokenID() - if err != nil { - return nil, fmt.Errorf("could not generate token ID: %w", err) - } - - sk, err := keys.NewPrivateKey() - if err != nil { - return nil, err - } - - s.mtx.Lock() - s.tokens[key{ - tokenID: base58.Encode(uidBytes), - ownerID: id.EncodeToString(), - }] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration()) - s.mtx.Unlock() - - res := new(session.CreateResponseBody) - res.SetID(uidBytes) - res.SetSessionKey(sk.PublicKey().Bytes()) - - return res, nil -} - -func (s *TokenStore) Close() error { - return nil -} diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go deleted file mode 100644 index c9da6b842..000000000 --- a/pkg/services/session/storage/temporary/storage.go +++ /dev/null @@ -1,61 +0,0 @@ -package temporary - -import ( - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/mr-tron/base58" -) - -type key struct { - // nolint:unused - tokenID string - // nolint:unused - ownerID string -} - -// TokenStore is an in-memory session token store. -// It allows creating (storing), retrieving and -// expiring (removing) session tokens. -// Must be created only via calling NewTokenStore. -type TokenStore struct { - mtx sync.RWMutex - - tokens map[key]*storage.PrivateToken -} - -// NewTokenStore creates, initializes and returns a new TokenStore instance. -// -// The elements of the instance are stored in the map. -func NewTokenStore() *TokenStore { - return &TokenStore{ - tokens: make(map[key]*storage.PrivateToken), - } -} - -// Get returns private token corresponding to the given identifiers. -// -// Returns nil is there is no element in storage. -func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken { - s.mtx.RLock() - t := s.tokens[key{ - tokenID: base58.Encode(tokenID), - ownerID: ownerID.EncodeToString(), - }] - s.mtx.RUnlock() - - return t -} - -// RemoveOld removes all tokens expired since provided epoch. -func (s *TokenStore) RemoveOld(epoch uint64) { - s.mtx.Lock() - defer s.mtx.Unlock() - - for k, tok := range s.tokens { - if tok.ExpiredAt() <= epoch { - delete(s.tokens, k) - } - } -} diff --git a/pkg/services/session/storage/types.go b/pkg/services/session/storage/types.go deleted file mode 100644 index 74fd88699..000000000 --- a/pkg/services/session/storage/types.go +++ /dev/null @@ -1,31 +0,0 @@ -package storage - -import ( - "crypto/ecdsa" -) - -// PrivateToken represents private session info. -type PrivateToken struct { - sessionKey *ecdsa.PrivateKey - - exp uint64 -} - -// NewPrivateToken returns new private token based on the -// passed values. -func NewPrivateToken(sk *ecdsa.PrivateKey, exp uint64) *PrivateToken { - return &PrivateToken{ - sessionKey: sk, - exp: exp, - } -} - -// SessionKey returns the private session key. -func (t *PrivateToken) SessionKey() *ecdsa.PrivateKey { - return t.sessionKey -} - -// ExpiredAt returns epoch number until token is valid. -func (t *PrivateToken) ExpiredAt() uint64 { - return t.exp -} diff --git a/pkg/services/session/storage/util.go b/pkg/services/session/storage/util.go deleted file mode 100644 index 8892276a1..000000000 --- a/pkg/services/session/storage/util.go +++ /dev/null @@ -1,23 +0,0 @@ -package storage - -import ( - "fmt" - - "github.com/google/uuid" -) - -// NewTokenID generates new ID for a token -// based on UUID. -func NewTokenID() ([]byte, error) { - uid, err := uuid.NewRandom() - if err != nil { - return nil, fmt.Errorf("could not generate UUID: %w", err) - } - - uidBytes, err := uid.MarshalBinary() - if err != nil { - return nil, fmt.Errorf("could not marshal marshal UUID: %w", err) - } - - return uidBytes, nil -} diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go deleted file mode 100644 index 58757ff6d..000000000 --- a/pkg/services/tree/ape.go +++ /dev/null @@ -1,104 +0,0 @@ -package tree - -import ( - "context" - "encoding/hex" - "fmt" - "net" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter" - aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/peer" -) - -func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, -) (aperequest.Request, error) { - schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) - if err != nil { - return aperequest.Request{}, err - } - schemaRole, err := converter.SchemaRoleFromACLRole(role) - if err != nil { - return aperequest.Request{}, err - } - reqProps := map[string]string{ - nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()), - nativeschema.PropertyKeyActorRole: schemaRole, - } - reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey) - if err != nil { - return aperequest.Request{}, err - } - if p, ok := peer.FromContext(ctx); ok { - if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { - reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() - } - } - - var resourceName string - if namespace == "root" || namespace == "" { - resourceName = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString()) - } else { - resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) - } - - resProps := map[string]string{ - nativeschema.ProperyKeyTreeID: treeID, - } - - return aperequest.NewRequest( - schemaMethod, - aperequest.NewResource(resourceName, resProps), - reqProps, - ), nil -} - -func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, -) error { - namespace := "" - cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") - if hasNamespace { - namespace = cntNamespace - } - - request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) - if err != nil { - return fmt.Errorf("failed to create ape request: %w", err) - } - - return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{ - Request: request, - Namespace: namespace, - Container: cid, - ContainerOwner: container.Value.Owner(), - PublicKey: publicKey, - BearerToken: bt, - }) -} - -// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { - if reqProps == nil { - reqProps = make(map[string]string) - } - props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey) - if err != nil { - return reqProps, err - } - for propertyName, properyValue := range props { - reqProps[propertyName] = properyValue - } - return reqProps, nil -} diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go deleted file mode 100644 index 7b209fd47..000000000 --- a/pkg/services/tree/ape_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package tree - -import ( - "context" - "encoding/hex" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -var ( - containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy" - - senderPrivateKey, _ = keys.NewPrivateKey() - - senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes()) - - rootCnr = &core.Container{Value: containerSDK.Container{}} -) - -type frostfsIDProviderMock struct { - subjects map[util.Uint160]*client.Subject - subjectsExtended map[util.Uint160]*client.SubjectExtended -} - -func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { - v, ok := f.subjects[key] - if !ok { - return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) - } - return v, nil -} - -func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { - v, ok := f.subjectsExtended[key] - if !ok { - return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) - } - return v, nil -} - -var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil) - -func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock { - return &frostfsIDProviderMock{ - subjects: map[util.Uint160]*client.Subject{ - scriptHashFromSenderKey(t, senderKey): { - Namespace: "testnamespace", - Name: "test", - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExtended: map[util.Uint160]*client.SubjectExtended{ - scriptHashFromSenderKey(t, senderKey): { - Namespace: "testnamespace", - Name: "test", - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 1, - Name: "test", - Namespace: "testnamespace", - KV: map[string]string{ - "attr1": "value1", - "attr2": "value2", - }, - }, - }, - }, - }, - } -} - -func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { - pk, err := keys.NewPublicKeyFromString(senderKey) - require.NoError(t, err) - return pk.GetScriptHash() -} - -type stMock struct{} - -func (m *stMock) CurrentEpoch() uint64 { - return 8 -} - -func TestCheckAPE(t *testing.T) { - cid := cid.ID{} - _ = cid.DecodeString(containerID) - - t.Run("treeID rule", func(t *testing.T) { - los := inmemory.NewInmemoryLocalStorage() - mcs := inmemory.NewInmemoryMorphRuleChainStorage() - fid := newFrostfsIDProviderMock(t) - s := Service{ - cfg: cfg{ - frostfsidSubjectProvider: fid, - }, - apeChecker: checkercore.New(los, mcs, fid, &stMock{}), - } - - mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.QuotaLimitReached, - Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindResource, - Key: nativeschema.ProperyKeyTreeID, - Value: versionTreeID, - }, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) - - var chErr *checkercore.ChainRouterError - require.ErrorAs(t, err, &chErr) - require.Equal(t, chain.QuotaLimitReached, chErr.Status()) - }) - - t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { - los := inmemory.NewInmemoryLocalStorage() - mcs := inmemory.NewInmemoryMorphRuleChainStorage() - fid := newFrostfsIDProviderMock(t) - s := Service{ - cfg: cfg{ - frostfsidSubjectProvider: fid, - }, - apeChecker: checkercore.New(los, mcs, fid, &stMock{}), - } - - los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Condition: []chain.Condition{ - { - Op: chain.CondStringNotEquals, - Kind: chain.KindResource, - Key: nativeschema.PropertyKeyObjectType, - Value: "TOMBSTONE", - }, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) - require.NoError(t, err) - }) - - t.Run("delete rule won't affect tree add", func(t *testing.T) { - los := inmemory.NewInmemoryLocalStorage() - mcs := inmemory.NewInmemoryMorphRuleChainStorage() - fid := newFrostfsIDProviderMock(t) - s := Service{ - cfg: cfg{ - frostfsidSubjectProvider: fid, - }, - apeChecker: checkercore.New(los, mcs, fid, &stMock{}), - } - - los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.Allow, - Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Condition: []chain.Condition{ - { - Op: chain.CondStringNotEquals, - Kind: chain.KindResource, - Key: nativeschema.PropertyKeyObjectType, - Value: "TOMBSTONE", - }, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) - require.NoError(t, err) - }) -} diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go deleted file mode 100644 index a11700771..000000000 --- a/pkg/services/tree/cache.go +++ /dev/null @@ -1,90 +0,0 @@ -package tree - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "sync" - "time" - - internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "github.com/hashicorp/golang-lru/v2/simplelru" - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" -) - -type clientCache struct { - sync.Mutex - simplelru.LRU[string, cacheItem] - key *ecdsa.PrivateKey - ds *internalNet.DialerSource -} - -type cacheItem struct { - cc *grpc.ClientConn - lastTry time.Time -} - -const ( - defaultClientCacheSize = 32 - defaultClientConnectTimeout = time.Second * 2 - defaultReconnectInterval = time.Second * 15 -) - -var errRecentlyFailed = errors.New("client has recently failed") - -func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { - l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) { - if conn := value.cc; conn != nil { - _ = conn.Close() - } - }) - c.LRU = *l - c.key = pk - c.ds = ds -} - -func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { - c.Lock() - ccInt, ok := c.Get(netmapAddr) - c.Unlock() - - if ok { - item := ccInt - if item.cc == nil { - if d := time.Since(item.lastTry); d < defaultReconnectInterval { - return nil, fmt.Errorf("%w: %s till the next reconnection to %s", - errRecentlyFailed, d, netmapAddr) - } - } else { - if s := item.cc.GetState(); s == connectivity.Idle || s == connectivity.Ready { - return NewTreeServiceClient(item.cc), nil - } - _ = item.cc.Close() - } - } - - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) - lastTry := time.Now() - - c.Lock() - if err != nil { - c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) - } else { - c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) - } - c.Unlock() - - if err != nil { - return nil, err - } - - return NewTreeServiceClient(cc), nil -} diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go deleted file mode 100644 index c641a21a2..000000000 --- a/pkg/services/tree/container.go +++ /dev/null @@ -1,90 +0,0 @@ -package tree - -import ( - "bytes" - "context" - "crypto/sha256" - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/hashicorp/golang-lru/v2/simplelru" -) - -type containerCache struct { - sync.Mutex - nm *netmapSDK.NetMap - lru *simplelru.LRU[string, containerCacheItem] -} - -func (c *containerCache) init(size int) { - c.lru, _ = simplelru.NewLRU[string, containerCacheItem](size, nil) // no error, size is positive -} - -type containerCacheItem struct { - cnr *container.Container - local int - nodes []netmapSDK.NodeInfo -} - -const defaultContainerCacheSize = 10 - -// getContainerNodes returns nodes in the container and a position of local key in the list. -func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { - nm, err := s.nmSource.GetNetMap(ctx, 0) - if err != nil { - return nil, -1, fmt.Errorf("can't get netmap: %w", err) - } - - cnr, err := s.cnrSource.Get(ctx, cid) - if err != nil { - return nil, -1, fmt.Errorf("can't get container: %w", err) - } - - cidStr := cid.String() - - s.containerCache.Lock() - if s.containerCache.nm != nm { - s.containerCache.lru.Purge() - } else if item, ok := s.containerCache.lru.Get(cidStr); ok { - if item.cnr == cnr { - s.containerCache.Unlock() - return item.nodes, item.local, nil - } - } - s.containerCache.Unlock() - - policy := cnr.Value.PlacementPolicy() - - rawCID := make([]byte, sha256.Size) - cid.Encode(rawCID) - - cntNodes, err := nm.ContainerNodes(policy, rawCID) - if err != nil { - return nil, -1, err - } - - nodes := placement.FlattenNodes(cntNodes) - - localPos := -1 - for i := range nodes { - if bytes.Equal(nodes[i].PublicKey(), s.rawPub) { - localPos = i - break - } - } - - s.containerCache.Lock() - s.containerCache.nm = nm - s.containerCache.lru.Add(cidStr, containerCacheItem{ - cnr: cnr, - local: localPos, - nodes: nodes, - }) - s.containerCache.Unlock() - - return nodes, localPos, err -} diff --git a/pkg/services/tree/drop.go b/pkg/services/tree/drop.go deleted file mode 100644 index a9e4e2e71..000000000 --- a/pkg/services/tree/drop.go +++ /dev/null @@ -1,14 +0,0 @@ -package tree - -import ( - "context" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// DropTree drops a tree from the database. If treeID is empty, all the trees are dropped. -func (s *Service) DropTree(ctx context.Context, cid cid.ID, treeID string) error { - // The only current use-case is a container removal, where all trees should be removed. - // Thus there is no need to replicate the operation on other node. - return s.forest.TreeDrop(ctx, cid, treeID) -} diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go deleted file mode 100644 index e7a13827e..000000000 --- a/pkg/services/tree/getsubtree_test.go +++ /dev/null @@ -1,263 +0,0 @@ -package tree - -import ( - "context" - "errors" - "path" - "path/filepath" - "slices" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -func TestGetSubTree(t *testing.T) { - d := pilorama.CIDDescriptor{CID: cidtest.ID(), Size: 1} - treeID := "sometree" - p := pilorama.NewMemoryForest() - - tree := []struct { - path []string - id uint64 - }{ - {path: []string{"dir1"}}, - {path: []string{"dir2"}}, - {path: []string{"dir1", "sub1"}}, - {path: []string{"dir2", "sub1"}}, - {path: []string{"dir2", "sub2"}}, - {path: []string{"dir2", "sub1", "subsub1"}}, - } - - for i := range tree { - path := tree[i].path - meta := []pilorama.KeyValue{ - {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}, - } - - lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta) - require.NoError(t, err) - require.Equal(t, 1, len(lm)) - - tree[i].id = lm[0].Child - } - - testGetSubTree := func(t *testing.T, rootID uint64, depth uint32, errIndex int) []uint64 { - acc := subTreeAcc{errIndex: errIndex} - err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{ - TreeId: treeID, - RootId: []uint64{rootID}, - Depth: depth, - }, p) - if errIndex == -1 { - require.NoError(t, err) - } else { - require.ErrorIs(t, err, errSubTreeSend) - } - - // GetSubTree must return child only after is has returned the parent. - require.Equal(t, rootID, acc.seen[0].Body.NodeId[0]) - loop: - for i := 1; i < len(acc.seen); i++ { - parent := acc.seen[i].Body.ParentId - for j := range i { - if acc.seen[j].Body.NodeId[0] == parent[0] { - continue loop - } - } - require.Fail(t, "node has parent %d, but it hasn't been seen", parent) - } - - // GetSubTree must return valid meta. - for i := range acc.seen { - b := acc.seen[i].Body - meta, node, err := p.TreeGetMeta(context.Background(), d.CID, treeID, b.NodeId[0]) - require.NoError(t, err) - require.Equal(t, node, b.ParentId[0]) - require.Equal(t, meta.Time, b.Timestamp[0]) - require.Equal(t, metaToProto(meta.Items), b.Meta) - } - - ordered := make([]uint64, len(acc.seen)) - for i := range acc.seen { - ordered[i] = acc.seen[i].Body.NodeId[0] - } - return ordered - } - - t.Run("depth = 1, only root", func(t *testing.T) { - actual := testGetSubTree(t, 0, 1, -1) - require.Equal(t, []uint64{0}, actual) - - t.Run("custom root", func(t *testing.T) { - actual := testGetSubTree(t, tree[2].id, 1, -1) - require.Equal(t, []uint64{tree[2].id}, actual) - }) - }) - t.Run("depth = 2", func(t *testing.T) { - actual := testGetSubTree(t, 0, 2, -1) - require.Equal(t, []uint64{0, tree[0].id, tree[1].id}, actual) - - t.Run("error in the middle", func(t *testing.T) { - actual := testGetSubTree(t, 0, 2, 0) - require.Equal(t, []uint64{0}, actual) - - actual = testGetSubTree(t, 0, 2, 1) - require.Equal(t, []uint64{0, tree[0].id}, actual) - }) - }) - t.Run("depth = 0 (unrestricted)", func(t *testing.T) { - actual := testGetSubTree(t, 0, 0, -1) - expected := []uint64{ - 0, - tree[0].id, // dir1 - tree[2].id, // dir1/sub1 - tree[1].id, // dir2 - tree[3].id, // dir2/sub1 - tree[5].id, // dir2/sub1/subsub1 - tree[4].id, // dir2/sub2 - } - require.Equal(t, expected, actual) - }) -} - -func TestGetSubTreeOrderAsc(t *testing.T) { - t.Run("memory forest", func(t *testing.T) { - testGetSubTreeOrderAsc(t, pilorama.NewMemoryForest()) - }) - - t.Run("boltdb forest", func(t *testing.T) { - p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))) - require.NoError(t, p.Open(context.Background(), 0o644)) - require.NoError(t, p.Init(context.Background())) - testGetSubTreeOrderAsc(t, p) - }) -} - -func testGetSubTreeOrderAsc(t *testing.T, p pilorama.ForestStorage) { - d := pilorama.CIDDescriptor{CID: cidtest.ID(), Size: 1} - treeID := "sometree" - - tree := []struct { - path []string - id uint64 - }{ - {path: []string{"dir1"}}, - {path: []string{"dir2"}}, - {path: []string{"dir1", "sub1"}}, - {path: []string{"dir2", "sub1"}}, - {path: []string{"dir2", "sub2"}}, - {path: []string{"dir2", "sub1", "subsub1"}}, - } - - for i := range tree { - path := tree[i].path - meta := []pilorama.KeyValue{ - {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}, - } - - lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta) - require.NoError(t, err) - require.Equal(t, 1, len(lm)) - tree[i].id = lm[0].Child - } - - t.Run("total", func(t *testing.T) { - t.Skip() - acc := subTreeAcc{errIndex: -1} - err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{ - TreeId: treeID, - OrderBy: &GetSubTreeRequest_Body_Order{ - Direction: GetSubTreeRequest_Body_Order_Asc, - }, - }, p) - require.NoError(t, err) - // GetSubTree must return child only after is has returned the parent. - require.Equal(t, uint64(0), acc.seen[0].Body.NodeId) - - paths := make([]string, 0, len(acc.seen)) - for i := range acc.seen { - if i == 0 { - continue - } - found := false - for j := range tree { - if acc.seen[i].Body.NodeId[0] == tree[j].id { - found = true - paths = append(paths, path.Join(tree[j].path...)) - } - } - require.True(t, found, "unknown node %d %v", i, acc.seen[i].GetBody().GetNodeId()) - } - - require.True(t, slices.IsSorted(paths)) - }) - t.Run("depth=1", func(t *testing.T) { - acc := subTreeAcc{errIndex: -1} - err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{ - TreeId: treeID, - Depth: 1, - OrderBy: &GetSubTreeRequest_Body_Order{ - Direction: GetSubTreeRequest_Body_Order_Asc, - }, - }, p) - require.NoError(t, err) - require.Len(t, acc.seen, 1) - require.Equal(t, uint64(0), acc.seen[0].Body.NodeId[0]) - }) - t.Run("depth=2", func(t *testing.T) { - acc := subTreeAcc{errIndex: -1} - err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{ - TreeId: treeID, - Depth: 2, - OrderBy: &GetSubTreeRequest_Body_Order{ - Direction: GetSubTreeRequest_Body_Order_Asc, - }, - }, p) - require.NoError(t, err) - require.Len(t, acc.seen, 3) - require.Equal(t, uint64(0), acc.seen[0].Body.NodeId[0]) - require.Equal(t, uint64(0), acc.seen[1].GetBody().GetParentId()[0]) - require.Equal(t, uint64(0), acc.seen[2].GetBody().GetParentId()[0]) - }) -} - -var ( - errSubTreeSend = errors.New("send finished with error") - errSubTreeSendAfterError = errors.New("send was invoked after an error occurred") - errInvalidResponse = errors.New("send got invalid response") -) - -type subTreeAcc struct { - grpc.ServerStream // to satisfy the interface - // IDs of the seen nodes. - seen []*GetSubTreeResponse - errIndex int -} - -var _ TreeService_GetSubTreeServer = &subTreeAcc{} - -func (s *subTreeAcc) Send(r *GetSubTreeResponse) error { - b := r.GetBody() - if len(b.GetNodeId()) > 1 { - return errInvalidResponse - } - if len(b.GetParentId()) > 1 { - return errInvalidResponse - } - if len(b.GetTimestamp()) > 1 { - return errInvalidResponse - } - s.seen = append(s.seen, r) - if s.errIndex >= 0 { - if len(s.seen) == s.errIndex+1 { - return errSubTreeSend - } - if s.errIndex >= 0 && len(s.seen) > s.errIndex { - return errSubTreeSendAfterError - } - } - return nil -} diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go deleted file mode 100644 index 07503f8c3..000000000 --- a/pkg/services/tree/metrics.go +++ /dev/null @@ -1,17 +0,0 @@ -package tree - -import "time" - -type MetricsRegister interface { - AddReplicateTaskDuration(time.Duration, bool) - AddReplicateWaitDuration(time.Duration, bool) - AddSyncDuration(time.Duration, bool) - AddOperation(string, string) -} - -type defaultMetricsRegister struct{} - -func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {} -func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {} -func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {} -func (defaultMetricsRegister) AddOperation(string, string) {} diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go deleted file mode 100644 index a28651452..000000000 --- a/pkg/services/tree/options.go +++ /dev/null @@ -1,184 +0,0 @@ -package tree - -import ( - "context" - "crypto/ecdsa" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type ContainerSource interface { - container.Source - - DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) - - // List must return list of all the containers in the FrostFS network - // at the moment of a call and any error that does not allow fetching - // container information. - List(ctx context.Context) ([]cid.ID, error) -} - -type cfg struct { - log *logger.Logger - key *ecdsa.PrivateKey - rawPub []byte - state netmap.State - nmSource netmap.Source - cnrSource ContainerSource - frostfsidSubjectProvider frostfsidcore.SubjectProvider - forest pilorama.Forest - // replication-related parameters - replicatorChannelCapacity int - replicatorWorkerCount int - replicatorTimeout time.Duration - containerCacheSize int - authorizedKeys atomic.Pointer[[][]byte] - syncBatchSize int - syncDisabled bool - - localOverrideStorage policyengine.LocalOverrideStorage - morphChainStorage policyengine.MorphRuleChainStorageReader - - metrics MetricsRegister - ds *net.DialerSource -} - -// Option represents configuration option for a tree service. -type Option func(*cfg) - -// WithContainerSource sets a container source for a tree service. -// This option is required. -func WithContainerSource(src ContainerSource) Option { - return func(c *cfg) { - c.cnrSource = src - } -} - -func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option { - return func(c *cfg) { - c.frostfsidSubjectProvider = provider - } -} - -// WithNetmapSource sets a netmap source for a tree service. -// This option is required. -func WithNetmapSource(src netmap.Source) Option { - return func(c *cfg) { - c.nmSource = src - } -} - -// WithPrivateKey sets a netmap source for a tree service. -// This option is required. -func WithPrivateKey(key *ecdsa.PrivateKey) Option { - return func(c *cfg) { - c.key = key - c.rawPub = (*keys.PublicKey)(&key.PublicKey).Bytes() - } -} - -// WithLogger sets logger for a tree service. -func WithLogger(log *logger.Logger) Option { - return func(c *cfg) { - c.log = log - } -} - -// WithStorage sets tree storage for a service. -func WithStorage(s pilorama.Forest) Option { - return func(c *cfg) { - c.forest = s - } -} - -func WithReplicationChannelCapacity(n int) Option { - return func(c *cfg) { - if n > 0 { - c.replicatorChannelCapacity = n - } - } -} - -func WithReplicationWorkerCount(n int) Option { - return func(c *cfg) { - if n > 0 { - c.replicatorWorkerCount = n - } - } -} - -func WithSyncBatchSize(n int) Option { - return func(c *cfg) { - c.syncBatchSize = n - } -} - -func WithSyncDisabled(d bool) Option { - return func(c *cfg) { - c.syncDisabled = d - } -} - -func WithContainerCacheSize(n int) Option { - return func(c *cfg) { - if n > 0 { - c.containerCacheSize = n - } - } -} - -func WithReplicationTimeout(t time.Duration) Option { - return func(c *cfg) { - if t > 0 { - c.replicatorTimeout = t - } - } -} - -func WithMetrics(v MetricsRegister) Option { - return func(c *cfg) { - c.metrics = v - } -} - -// WithAuthorizedKeys returns option to add list of public -// keys that have rights to use Tree service. -func WithAuthorizedKeys(keys keys.PublicKeys) Option { - return func(c *cfg) { - c.authorizedKeys.Store(fromPublicKeys(keys)) - } -} - -func WithAPELocalOverrideStorage(localOverrideStorage policyengine.LocalOverrideStorage) Option { - return func(c *cfg) { - c.localOverrideStorage = localOverrideStorage - } -} - -func WithAPEMorphRuleStorage(morphRuleStorage policyengine.MorphRuleChainStorageReader) Option { - return func(c *cfg) { - c.morphChainStorage = morphRuleStorage - } -} - -func WithNetmapState(state netmap.State) Option { - return func(c *cfg) { - c.state = state - } -} - -func WithDialerSource(ds *net.DialerSource) Option { - return func(c *cfg) { - c.ds = ds - } -} diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go deleted file mode 100644 index 8f21686df..000000000 --- a/pkg/services/tree/qos.go +++ /dev/null @@ -1,101 +0,0 @@ -package tree - -import ( - "context" - - "google.golang.org/grpc" -) - -var _ TreeServiceServer = (*ioTagAdjust)(nil) - -type AdjustIOTag interface { - AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context -} - -type ioTagAdjust struct { - s TreeServiceServer - a AdjustIOTag -} - -func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer { - return &ioTagAdjust{ - s: s, - a: a, - } -} - -func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Add(ctx, req) -} - -func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.AddByPath(ctx, req) -} - -func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Apply(ctx, req) -} - -func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.GetNodeByPath(ctx, req) -} - -func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { - ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) - return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{ - sender: srv, - ServerStream: srv, - ctxF: func() context.Context { return ctx }, - }) -} - -func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { - ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) - return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{ - sender: srv, - ServerStream: srv, - ctxF: func() context.Context { return ctx }, - }) -} - -func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Healthcheck(ctx, req) -} - -func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Move(ctx, req) -} - -func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Remove(ctx, req) -} - -func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.TreeList(ctx, req) -} - -type qosSend[T any] interface { - Send(T) error -} - -type qosServerWrapper[T any] struct { - grpc.ServerStream - sender qosSend[T] - ctxF func() context.Context -} - -func (w *qosServerWrapper[T]) Send(resp T) error { - return w.sender.Send(resp) -} - -func (w *qosServerWrapper[T]) Context() context.Context { - return w.ctxF() -} diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go deleted file mode 100644 index 647f8cb30..000000000 --- a/pkg/services/tree/redirect.go +++ /dev/null @@ -1,77 +0,0 @@ -package tree - -import ( - "bytes" - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -var errNoSuitableNode = errors.New("no node was found to execute the request") - -func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { - var resp *Resp - var outErr error - err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { - resp, outErr = callback(c, fCtx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr -} - -// forEachNode executes callback for each node in the container until true is returned. -// Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { - for _, n := range cntNodes { - if bytes.Equal(n.PublicKey(), s.rawPub) { - return nil - } - } - - var called bool - for _, n := range cntNodes { - var stop bool - for endpoint := range n.NetworkEndpoints() { - stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { - called = true - return f(fCtx, c) - }) - if called { - break - } - } - if stop { - return nil - } - } - if !called { - return errNoSuitableNode - } - return nil -} - -func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false - } - - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) - return f(ctx, c) -} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go deleted file mode 100644 index bc6e26fa7..000000000 --- a/pkg/services/tree/replicator.go +++ /dev/null @@ -1,245 +0,0 @@ -package tree - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" -) - -type movePair struct { - cid cidSDK.ID - treeID string - op *pilorama.Move - excPub []byte -} - -type replicationTask struct { - n netmapSDK.NodeInfo - req *ApplyRequest -} - -type applyOp struct { - treeID string - cid cidSDK.ID - pilorama.Move -} - -const ( - defaultReplicatorCapacity = 64 - defaultReplicatorWorkerCount = 64 - defaultReplicatorSendTimeout = time.Second * 5 - defaultSyncBatchSize = 1000 -) - -func (s *Service) localReplicationWorker(ctx context.Context) { - for { - select { - case <-s.closeCh: - return - case op := <-s.replicateLocalCh: - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationOperation", - trace.WithAttributes( - attribute.String("tree_id", op.treeID), - attribute.String("container_id", op.cid.EncodeToString()), - ), - ) - - err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false) - if err != nil { - s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation, - zap.Error(err)) - } - span.End() - } - } -} - -func (s *Service) replicationWorker(ctx context.Context) { - for { - select { - case <-s.closeCh: - return - case task := <-s.replicationTasks: - _ = s.ReplicateTreeOp(ctx, task.n, task.req) - } - } -} - -func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req *ApplyRequest) error { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTask", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - ), - ) - defer span.End() - - start := time.Now() - - var lastErr error - var lastAddr string - - for addr := range n.NetworkEndpoints() { - lastAddr = addr - lastErr = s.apply(ctx, n, addr, req) - if lastErr == nil { - break - } - } - - if lastErr != nil { - if errors.Is(lastErr, errRecentlyFailed) { - s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode, - zap.String("last_error", lastErr.Error())) - } else { - s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode, - zap.String("last_error", lastErr.Error()), - zap.String("address", lastAddr), - zap.String("key", hex.EncodeToString(n.PublicKey()))) - } - s.metrics.AddReplicateTaskDuration(time.Since(start), false) - return lastErr - } - s.metrics.AddReplicateTaskDuration(time.Since(start), true) - return nil -} - -func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - - c, err := s.cache.get(ctx, addr) - if err != nil { - return fmt.Errorf("can't create client: %w", err) - } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, err = c.Apply(ctx, req) - cancel() - return err -} - -func (s *Service) replicateLoop(ctx context.Context) { - for range s.replicatorWorkerCount { - go s.replicationWorker(ctx) - go s.localReplicationWorker(ctx) - } - defer func() { - for len(s.replicationTasks) != 0 { - <-s.replicationTasks - } - }() - - for { - select { - case <-s.closeCh: - return - case <-ctx.Done(): - return - case op := <-s.replicateCh: - start := time.Now() - err := s.replicate(ctx, op) - if err != nil { - s.log.Error(ctx, logs.TreeErrorDuringReplication, - zap.Error(err), - zap.Stringer("cid", op.cid), - zap.String("treeID", op.treeID)) - } - s.metrics.AddReplicateWaitDuration(time.Since(start), err == nil) - } - } -} - -func (s *Service) replicate(ctx context.Context, op movePair) error { - req := newApplyRequest(&op) - err := SignMessage(req, s.key) - if err != nil { - return fmt.Errorf("can't sign data: %w", err) - } - - nodes, localIndex, err := s.getContainerNodes(ctx, op.cid) - if err != nil { - return fmt.Errorf("can't get container nodes: %w", err) - } - - for i := range nodes { - if i != localIndex && !bytes.Equal(nodes[i].PublicKey(), op.excPub) { - s.replicationTasks <- replicationTask{nodes[i], req} - } - } - return nil -} - -func (s *Service) replicateToRemoteNode(ctx context.Context, op movePair, - nodes []netmapSDK.NodeInfo, localIndex int, -) ([]byte, error) { - req := newApplyRequest(&op) - err := SignMessage(req, s.key) - if err != nil { - return nil, fmt.Errorf("can't sign data: %w", err) - } - - var errMulti error - for i := range nodes { - if i != localIndex { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - err := s.ReplicateTreeOp(ctx, nodes[i], req) - if err != nil { - errMulti = errors.Join(errMulti, err) - continue - } - return nodes[i].PublicKey(), nil - } - } - return nil, errMulti -} - -func (s *Service) pushToQueue(cid cidSDK.ID, treeID string, op *pilorama.Move, excPub []byte) { - select { - case s.replicateCh <- movePair{ - cid: cid, - treeID: treeID, - op: op, - excPub: excPub, - }: - default: - } -} - -func newApplyRequest(op *movePair) *ApplyRequest { - rawCID := make([]byte, sha256.Size) - op.cid.Encode(rawCID) - - return &ApplyRequest{ - Body: &ApplyRequest_Body{ - ContainerId: rawCID, - TreeId: op.treeID, - Operation: &LogMove{ - ParentId: op.op.Parent, - Meta: op.op.Bytes(), - ChildId: op.op.Child, - }, - }, - } -} diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go deleted file mode 100644 index 81aa98b4d..000000000 --- a/pkg/services/tree/service.go +++ /dev/null @@ -1,835 +0,0 @@ -package tree - -import ( - "bytes" - "context" - "errors" - "fmt" - "slices" - "sync" - "sync/atomic" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Service represents tree-service capable of working with multiple -// instances of CRDT trees. -type Service struct { - cfg - - cache clientCache - replicateCh chan movePair - replicateLocalCh chan applyOp - replicationTasks chan replicationTask - closeCh chan struct{} - containerCache containerCache - - syncChan chan struct{} - syncPool *ants.Pool - - initialSyncDone atomic.Bool - - apeChecker checkercore.CheckCore - - // cnrMap contains existing (used) container IDs. - cnrMap map[cidSDK.ID]struct{} - // cnrMapMtx protects cnrMap - cnrMapMtx sync.Mutex -} - -var _ TreeServiceServer = (*Service)(nil) - -// New creates new tree service instance. -func New(opts ...Option) *Service { - var s Service - s.containerCacheSize = defaultContainerCacheSize - s.replicatorChannelCapacity = defaultReplicatorCapacity - s.replicatorWorkerCount = defaultReplicatorWorkerCount - s.replicatorTimeout = defaultReplicatorSendTimeout - s.syncBatchSize = defaultSyncBatchSize - s.metrics = defaultMetricsRegister{} - s.authorizedKeys.Store(&[][]byte{}) - - for i := range opts { - opts[i](&s.cfg) - } - - if s.log == nil { - s.log = logger.NewLoggerWrapper(zap.NewNop()) - } - - s.cache.init(s.key, s.ds) - s.closeCh = make(chan struct{}) - s.replicateCh = make(chan movePair, s.replicatorChannelCapacity) - s.replicateLocalCh = make(chan applyOp) - s.replicationTasks = make(chan replicationTask, s.replicatorWorkerCount) - s.containerCache.init(s.containerCacheSize) - s.cnrMap = make(map[cidSDK.ID]struct{}) - s.syncChan = make(chan struct{}) - s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount) - - s.apeChecker = checkercore.New(s.localOverrideStorage, s.morphChainStorage, s.frostfsidSubjectProvider, s.state) - - return &s -} - -// Start starts the service. -func (s *Service) Start(ctx context.Context) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) - go s.replicateLoop(ctx) - if s.syncDisabled { - s.initialSyncDone.Store(true) - return - } - go s.syncLoop(ctx) - - select { - case <-s.closeCh: - case <-ctx.Done(): - default: - // initial sync - s.syncChan <- struct{}{} - } -} - -// Shutdown shutdowns the service. -func (s *Service) Shutdown() { - close(s.closeCh) - s.syncPool.Release() -} - -func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { - defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx)) - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(b.GetContainerId()); err != nil { - return nil, err - } - - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) - if err != nil { - return nil, err - } - - ns, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, err - } - if pos < 0 { - return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add) - } - - d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{ - Parent: b.GetParentId(), - Child: pilorama.RootID, - Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())}, - }) - if err != nil { - return nil, err - } - - excPub, err := s.replicateToRemoteNode(ctx, movePair{ - cid: cid, - treeID: b.GetTreeId(), - op: log, - }, ns, pos) - if err != nil { - return nil, err - } - s.pushToQueue(cid, b.GetTreeId(), log, excPub) - - return &AddResponse{ - Body: &AddResponse_Body{ - NodeId: log.Child, - }, - }, nil -} - -func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { - defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx)) - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(b.GetContainerId()); err != nil { - return nil, err - } - - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) - if err != nil { - return nil, err - } - - ns, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, err - } - if pos < 0 { - return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath) - } - - meta := protoToMeta(b.GetMeta()) - - attr := b.GetPathAttribute() - if len(attr) == 0 { - attr = pilorama.AttributeFilename - } - - d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - logs, err := s.forest.TreeAddByPath(ctx, d, b.GetTreeId(), attr, b.GetPath(), meta) - if err != nil { - return nil, err - } - - for i := range logs { - excPub, err := s.replicateToRemoteNode(ctx, movePair{ - cid: cid, - treeID: b.GetTreeId(), - op: &logs[i], - }, ns, pos) - if err != nil { - return nil, err - } - s.pushToQueue(cid, b.GetTreeId(), &logs[i], excPub) - } - - nodes := make([]uint64, len(logs)) - nodes[0] = logs[len(logs)-1].Child - for i, l := range logs[:len(logs)-1] { - nodes[i+1] = l.Child - } - - return &AddByPathResponse{ - Body: &AddByPathResponse_Body{ - Nodes: nodes, - }, - }, nil -} - -func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { - defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx)) - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(b.GetContainerId()); err != nil { - return nil, err - } - - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) - if err != nil { - return nil, err - } - - ns, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, err - } - if pos < 0 { - return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove) - } - - if b.GetNodeId() == pilorama.RootID { - return nil, fmt.Errorf("node with ID %d is root and can't be removed", b.GetNodeId()) - } - - d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{ - Parent: pilorama.TrashID, - Child: b.GetNodeId(), - }) - if err != nil { - return nil, err - } - - excPub, err := s.replicateToRemoteNode(ctx, movePair{ - cid: cid, - treeID: b.GetTreeId(), - op: log, - }, ns, pos) - if err != nil { - return nil, err - } - s.pushToQueue(cid, b.GetTreeId(), log, excPub) - return new(RemoveResponse), nil -} - -// Move applies client operation to the specified tree and pushes in queue -// for replication on other nodes. -func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { - defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx)) - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(b.GetContainerId()); err != nil { - return nil, err - } - - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) - if err != nil { - return nil, err - } - - ns, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, err - } - if pos < 0 { - return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move) - } - - if b.GetNodeId() == pilorama.RootID { - return nil, fmt.Errorf("node with ID %d is root and can't be moved", b.GetNodeId()) - } - - d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{ - Parent: b.GetParentId(), - Child: b.GetNodeId(), - Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())}, - }) - if err != nil { - return nil, err - } - - excPub, err := s.replicateToRemoteNode(ctx, movePair{ - cid: cid, - treeID: b.GetTreeId(), - op: log, - }, ns, pos) - if err != nil { - return nil, err - } - s.pushToQueue(cid, b.GetTreeId(), log, excPub) - return new(MoveResponse), nil -} - -func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { - defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx)) - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(b.GetContainerId()); err != nil { - return nil, err - } - - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) - if err != nil { - return nil, err - } - - ns, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, err - } - if pos < 0 { - return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath) - } - - attr := b.GetPathAttribute() - if len(attr) == 0 { - attr = pilorama.AttributeFilename - } - - nodes, err := s.forest.TreeGetByPath(ctx, cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly()) - if err != nil { - return nil, err - } - - info := make([]GetNodeByPathResponse_Info, 0, len(nodes)) - for _, node := range nodes { - m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node) - if err != nil { - return nil, err - } - - var x GetNodeByPathResponse_Info - x.ParentId = parent - x.NodeId = node - x.Timestamp = m.Time - if b.GetAllAttributes() { - x.Meta = metaToProto(m.Items) - } else { - var metaValue []KeyValue - for _, kv := range m.Items { - if slices.Contains(b.GetAttributes(), kv.Key) { - metaValue = append(metaValue, KeyValue{ - Key: kv.Key, - Value: kv.Value, - }) - } - } - x.Meta = metaValue - } - info = append(info, x) - } - - return &GetNodeByPathResponse{ - Body: &GetNodeByPathResponse_Body{ - Nodes: info, - }, - }, nil -} - -func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { - defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context())) - if !s.initialSyncDone.Load() { - return ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(b.GetContainerId()); err != nil { - return err - } - - err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) - if err != nil { - return err - } - - ns, pos, err := s.getContainerNodes(srv.Context(), cid) - if err != nil { - return err - } - if pos < 0 { - var cli TreeService_GetSubTreeClient - var outErr error - err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(fCtx, req) - return true - }) - if err != nil { - return err - } else if outErr != nil { - return outErr - } - for resp, err := cli.Recv(); err == nil; resp, err = cli.Recv() { - if err := srv.Send(resp); err != nil { - return err - } - } - return nil - } - - return getSubTree(srv.Context(), srv, cid, b, s.forest) -} - -type stackItem struct { - values []pilorama.MultiNodeInfo - parent pilorama.MultiNode - last *pilorama.Cursor -} - -func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { - const batchSize = 1000 - - // For backward compatibility. - rootIDs := b.GetRootId() - if len(rootIDs) == 0 { - rootIDs = []uint64{0} - } - - // Traverse the tree in a DFS manner. Because we need to support arbitrary depth, - // recursive implementation is not suitable here, so we maintain explicit stack. - var ms []pilorama.KeyValue - var ps []uint64 - var ts []uint64 - for _, rootID := range rootIDs { - m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), rootID) - if err != nil { - return err - } - if ms == nil { - ms = m.Items - } else if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") - } - ts = append(ts, m.Time) - ps = append(ps, p) - } - - stack := []stackItem{{ - values: []pilorama.MultiNodeInfo{{ - Children: rootIDs, - Timestamps: ts, - Meta: ms, - Parents: ps, - }}, - parent: ps, - }} - - for { - if len(stack) == 0 { - break - } else if item := &stack[len(stack)-1]; len(item.values) == 0 { - if len(stack) == 1 { - break - } - - var err error - item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) - if err != nil { - return err - } - - if len(item.values) == 0 { - stack = stack[:len(stack)-1] - continue - } - } - - node, err := stackPopAndSend(stack, srv) - if err != nil { - return err - } - - if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() { - children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.Children, nil, batchSize) - if err != nil { - return err - } - if len(children) != 0 { - stack = append(stack, stackItem{ - values: children, - parent: node.Children, - last: last, - }) - } - } - } - return nil -} - -func stackPopAndSend(stack []stackItem, srv TreeService_GetSubTreeServer) (pilorama.MultiNodeInfo, error) { - node := stack[len(stack)-1].values[0] - stack[len(stack)-1].values = stack[len(stack)-1].values[1:] - - return node, srv.Send(&GetSubTreeResponse{ - Body: &GetSubTreeResponse_Body{ - NodeId: node.Children, - ParentId: node.Parents, - Timestamp: node.Timestamps, - Meta: metaToProto(node.Meta), - }, - }) -} - -func getSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { - if b.GetOrderBy().GetDirection() == GetSubTreeRequest_Body_Order_Asc { - return getSortedSubTree(ctx, srv, cid, b, forest) - } - - var rootID uint64 - if len(b.GetRootId()) > 0 { - rootID = b.GetRootId()[0] - } - - // Traverse the tree in a DFS manner. Because we need to support arbitrary depth, - // recursive implementation is not suitable here, so we maintain explicit stack. - m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), rootID) - if err != nil { - return err - } - stack := [][]pilorama.NodeInfo{{{ - ID: rootID, - Meta: m, - ParentID: p, - }}} - - for { - if len(stack) == 0 { - break - } else if len(stack[len(stack)-1]) == 0 { - stack = stack[:len(stack)-1] - continue - } - - node := stack[len(stack)-1][0] - stack[len(stack)-1] = stack[len(stack)-1][1:] - - err = srv.Send(&GetSubTreeResponse{ - Body: &GetSubTreeResponse_Body{ - NodeId: []uint64{node.ID}, - ParentId: []uint64{node.ParentID}, - Timestamp: []uint64{node.Meta.Time}, - Meta: metaToProto(node.Meta.Items), - }, - }) - if err != nil { - return err - } - - if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() { - children, err := forest.TreeGetChildren(ctx, cid, b.GetTreeId(), node.ID) - if err != nil { - return err - } - children, err = sortByFilename(children, b.GetOrderBy().GetDirection()) - if err != nil { - return err - } - if len(children) != 0 { - stack = append(stack, children) - } - } - } - return nil -} - -func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Direction) ([]pilorama.NodeInfo, error) { - switch d { - case GetSubTreeRequest_Body_Order_None: - return nodes, nil - case GetSubTreeRequest_Body_Order_Asc: - if len(nodes) == 0 { - return nodes, nil - } - slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int { - return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename)) - }) - return nodes, nil - default: - return nil, fmt.Errorf("unsupported order direction: %s", d.String()) - } -} - -// Apply locally applies operation from the remote node to the tree. -func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { - defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx)) - err := verifyMessage(req) - if err != nil { - return nil, err - } - - var cid cidSDK.ID - if err := cid.Decode(req.GetBody().GetContainerId()); err != nil { - return nil, err - } - - key := req.GetSignature().GetKey() - - _, pos, _, err := s.getContainerInfo(ctx, cid, key) - if err != nil { - return nil, err - } - if pos < 0 { - return nil, errors.New("`Apply` request must be signed by a container node") - } - - op := req.GetBody().GetOperation() - - var meta pilorama.Meta - if err := meta.FromBytes(op.GetMeta()); err != nil { - return nil, fmt.Errorf("can't parse meta-information: %w", err) - } - - select { - case s.replicateLocalCh <- applyOp{ - treeID: req.GetBody().GetTreeId(), - cid: cid, - Move: pilorama.Move{ - Parent: op.GetParentId(), - Child: op.GetChildId(), - Meta: meta, - }, - }: - default: - } - return &ApplyResponse{Body: &ApplyResponse_Body{}, Signature: &Signature{}}, nil -} - -func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { - defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context())) - if !s.initialSyncDone.Load() { - return ErrAlreadySyncing - } - - b := req.GetBody() - - var cid cidSDK.ID - if err := cid.Decode(req.GetBody().GetContainerId()); err != nil { - return err - } - - ns, pos, err := s.getContainerNodes(srv.Context(), cid) - if err != nil { - return err - } - if pos < 0 { - var cli TreeService_GetOpLogClient - var outErr error - err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(fCtx, req) - return true - }) - if err != nil { - return err - } else if outErr != nil { - return outErr - } - for resp, err := cli.Recv(); err == nil; resp, err = cli.Recv() { - if err := srv.Send(resp); err != nil { - return err - } - } - return nil - } - - h := b.GetHeight() - lastHeight, err := s.forest.TreeHeight(srv.Context(), cid, b.GetTreeId()) - if err != nil { - return err - } - for { - lm, err := s.forest.TreeGetOpLog(srv.Context(), cid, b.GetTreeId(), h) - if err != nil || lm.Time == 0 || lastHeight < lm.Time { - return err - } - - err = srv.Send(&GetOpLogResponse{ - Body: &GetOpLogResponse_Body{ - Operation: &LogMove{ - ParentId: lm.Parent, - Meta: lm.Bytes(), - ChildId: lm.Child, - }, - }, - }) - if err != nil { - return err - } - - h = lm.Time + 1 - } -} - -func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { - defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx)) - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - var cid cidSDK.ID - - err := cid.Decode(req.GetBody().GetContainerId()) - if err != nil { - return nil, err - } - - // just verify the signature, not ACL checks - // since tree ID list is not protected like - // the containers list - err = verifyMessage(req) - if err != nil { - return nil, err - } - - ns, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, err - } - if pos < 0 { - return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList) - } - - ids, err := s.forest.TreeList(ctx, cid) - if err != nil { - return nil, err - } - - return &TreeListResponse{ - Body: &TreeListResponse_Body{ - Ids: ids, - }, - }, nil -} - -func protoToMeta(arr []KeyValue) []pilorama.KeyValue { - meta := make([]pilorama.KeyValue, len(arr)) - for i, kv := range arr { - meta[i].Key = kv.GetKey() - meta[i].Value = kv.GetValue() - } - return meta -} - -func metaToProto(arr []pilorama.KeyValue) []KeyValue { - meta := make([]KeyValue, len(arr)) - for i, kv := range arr { - meta[i] = KeyValue{ - Key: kv.Key, - Value: kv.Value, - } - } - return meta -} - -// getContainerInfo returns the list of container nodes, position in the container for the node -// with pub key and total amount of nodes in all replicas. -func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { - cntNodes, _, err := s.getContainerNodes(ctx, cid) - if err != nil { - return nil, 0, 0, err - } - - for i, node := range cntNodes { - if bytes.Equal(node.PublicKey(), pub) { - return cntNodes, i, len(cntNodes), nil - } - } - return cntNodes, -1, len(cntNodes), nil -} - -func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error) { - if !s.initialSyncDone.Load() { - return nil, ErrAlreadySyncing - } - - return new(HealthcheckResponse), nil -} - -func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { - s.authorizedKeys.Store(fromPublicKeys(newKeys)) -} - -func fromPublicKeys(keys keys.PublicKeys) *[][]byte { - buff := make([][]byte, len(keys)) - for i, k := range keys { - buff[i] = k.Bytes() - } - return &buff -} diff --git a/pkg/services/tree/service.proto b/pkg/services/tree/service.proto deleted file mode 100644 index 88bf0bca4..000000000 --- a/pkg/services/tree/service.proto +++ /dev/null @@ -1,374 +0,0 @@ -/** - * Service for working with CRDT tree. - */ -syntax = "proto3"; - -package tree; - -import "pkg/services/tree/types.proto"; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"; - -service TreeService { - /* Client API */ - - // Client methods are mapped to the object RPC: - // [ Add, AddByPath, Remove, Move ] -> PUT; - // [ GetNodeByPath, GetSubTree ] -> GET. - // One of the following must be true: - // - a signer passes non-extended basic ACL; - // - a signer passes extended basic ACL AND bearer token is - // attached AND the basic ACL allows attaching bearer token - // to the GET/PUT operation AND eACL table in the bearer contains - // an explicit allowing the signer's key (or its role) rule - // for the GET/PUT operation; - // - a signer passes extended basic ACL AND the extension - // contains an explicit allowing the signer's key (or its role) - // rule for GET/PUT operation. - // Otherwise, a request is denied. - - // Add adds new node to the tree. Invoked by a client. - rpc Add(AddRequest) returns (AddResponse); - // AddByPath adds new node to the tree by path. Invoked by a client. - rpc AddByPath(AddByPathRequest) returns (AddByPathResponse); - // Remove removes node from the tree. Invoked by a client. - rpc Remove(RemoveRequest) returns (RemoveResponse); - // Move moves node from one parent to another. Invoked by a client. - rpc Move(MoveRequest) returns (MoveResponse); - // GetNodeByPath returns list of IDs corresponding to a specific filepath. - rpc GetNodeByPath(GetNodeByPathRequest) returns (GetNodeByPathResponse); - // GetSubTree returns tree corresponding to a specific node. - rpc GetSubTree(GetSubTreeRequest) returns (stream GetSubTreeResponse); - // TreeList return list of the existing trees in the container. - rpc TreeList(TreeListRequest) returns (TreeListResponse); - - /* Synchronization API */ - - // Apply pushes log operation from another node to the current. - // The request must be signed by a container node. - rpc Apply(ApplyRequest) returns (ApplyResponse); - // GetOpLog returns a stream of logged operations starting from some height. - rpc GetOpLog(GetOpLogRequest) returns (stream GetOpLogResponse); - // Healthcheck is a dummy rpc to check service availability - rpc Healthcheck(HealthcheckRequest) returns (HealthcheckResponse); -} - -message AddRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // ID of the parent to attach node to. - uint64 parent_id = 3; - // Key-Value pairs with meta information. - repeated KeyValue meta = 4; - // Bearer token in V2 format. - bytes bearer_token = 5; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message AddResponse { - message Body { - // ID of the created node. - uint64 node_id = 1; - } - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message AddByPathRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // Attribute to build path with. Default: "FileName". - string path_attribute = 3; - // List of path components. - repeated string path = 4; - // Node meta-information. - repeated KeyValue meta = 5; - // Bearer token in V2 format. - bytes bearer_token = 6; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message AddByPathResponse { - message Body { - // List of all created nodes. The first one is the leaf. - repeated uint64 nodes = 1; - // ID of the parent node where new nodes were attached. - uint64 parent_id = 2; - } - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message RemoveRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // ID of the node to remove. - uint64 node_id = 3; - // Bearer token in V2 format. - bytes bearer_token = 4; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message RemoveResponse { - message Body {} - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message MoveRequest { - message Body { - // TODO import neo.fs.v2.refs.ContainerID directly. - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // ID of the new parent. - uint64 parent_id = 3; - // ID of the node to move. - uint64 node_id = 4; - // Node meta-information. - repeated KeyValue meta = 5; - // Bearer token in V2 format. - bytes bearer_token = 6; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message MoveResponse { - message Body {} - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message GetNodeByPathRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // Attribute to build path with. Default: "FileName". - string path_attribute = 3; - // List of path components. - repeated string path = 4; - // List of attributes to include in response. - repeated string attributes = 5; - // Flag to return only the latest version of node. - bool latest_only = 6; - // Flag to return all stored attributes. - bool all_attributes = 7; - // Bearer token in V2 format. - bytes bearer_token = 8; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message GetNodeByPathResponse { - // Information about a single tree node. - message Info { - // Node ID. - uint64 node_id = 1; - // Timestamp of the last operation with the node. - uint64 timestamp = 2; - // Node meta-information. - repeated KeyValue meta = 3; - // Parent ID. - uint64 parent_id = 4; - } - message Body { - // List of nodes stored by path. - repeated Info nodes = 1; - } - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message GetSubTreeRequest { - message Body { - message Order { - enum Direction { - None = 0; - Asc = 1; - } - Direction direction = 1; - } - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // IDs of the root nodes of a subtree forest. - repeated uint64 root_id = 3 [ packed = false ]; - // Optional depth of the traversal. Zero means return only root. - // Maximum depth is 10. - uint32 depth = 4; - // Bearer token in V2 format. - bytes bearer_token = 5; - // Result ordering. - Order order_by = 6; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message GetSubTreeResponse { - message Body { - // ID of the node. - repeated uint64 node_id = 1 [ packed = false ]; - // ID of the parent. - repeated uint64 parent_id = 2 [ packed = false ]; - // Time node was first added to a tree. - repeated uint64 timestamp = 3 [ packed = false ]; - // Node meta-information. - repeated KeyValue meta = 4; - } - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message TreeListRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message TreeListResponse { - message Body { - // Tree IDs. - repeated string ids = 1; - } - - // Response body. - Body body = 1; - Signature signature = 2; -} - -message ApplyRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // Operation to be applied. - LogMove operation = 3; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message ApplyResponse { - message Body {} - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message GetOpLogRequest { - message Body { - // Container ID in V2 format. - bytes container_id = 1; - // The name of the tree. - string tree_id = 2; - // Starting height to return logs from. - uint64 height = 3; - // Amount of operations to return. - uint64 count = 4; - } - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} - -message GetOpLogResponse { - message Body { - // Operation on a tree. - LogMove operation = 1; - } - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message HealthcheckResponse { - message Body {} - - // Response body. - Body body = 1; - // Response signature. - Signature signature = 2; -}; - -message HealthcheckRequest { - message Body {} - - // Request body. - Body body = 1; - // Request signature. - Signature signature = 2; -} diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go deleted file mode 100644 index 88d002621..000000000 --- a/pkg/services/tree/service_frostfs.pb.go +++ /dev/null @@ -1,8847 +0,0 @@ -// Code generated by protoc-gen-go-frostfs. DO NOT EDIT. - -package tree - -import ( - json "encoding/json" - fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" - easyproto "github.com/VictoriaMetrics/easyproto" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" - protowire "google.golang.org/protobuf/encoding/protowire" - strconv "strconv" -) - -type AddRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - ParentId uint64 `json:"parentId"` - Meta []KeyValue `json:"meta"` - BearerToken []byte `json:"bearerToken"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*AddRequest_Body)(nil) - _ json.Marshaler = (*AddRequest_Body)(nil) - _ json.Unmarshaler = (*AddRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.UInt64Size(3, x.ParentId) - for i := range x.Meta { - size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i]) - } - size += proto.BytesSize(5, x.BearerToken) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if x.ParentId != 0 { - mm.AppendUint64(3, x.ParentId) - } - for i := range x.Meta { - x.Meta[i].EmitProtobuf(mm.AppendMessage(4)) - } - if len(x.BearerToken) != 0 { - mm.AppendBytes(5, x.BearerToken) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // ParentId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ParentId") - } - x.ParentId = data - case 4: // Meta - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Meta") - } - x.Meta = append(x.Meta, KeyValue{}) - ff := &x.Meta[len(x.Meta)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 5: // BearerToken - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "BearerToken") - } - x.BearerToken = data - } - } - return nil -} -func (x *AddRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *AddRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *AddRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *AddRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *AddRequest_Body) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} -func (x *AddRequest_Body) SetParentId(v uint64) { - x.ParentId = v -} -func (x *AddRequest_Body) GetMeta() []KeyValue { - if x != nil { - return x.Meta - } - return nil -} -func (x *AddRequest_Body) SetMeta(v []KeyValue) { - x.Meta = v -} -func (x *AddRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} -func (x *AddRequest_Body) SetBearerToken(v []byte) { - x.BearerToken = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parentId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"meta\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Meta { - if i != 0 { - out.RawByte(',') - } - x.Meta[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"bearerToken\":" - out.RawString(prefix) - if x.BearerToken != nil { - out.Base64Bytes(x.BearerToken) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "parentId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.ParentId = f - } - case "meta": - { - var f KeyValue - var list []KeyValue - in.Delim('[') - for !in.IsDelim(']') { - f = KeyValue{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Meta = list - in.Delim(']') - } - case "bearerToken": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.BearerToken = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddRequest struct { - Body *AddRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddRequest)(nil) - _ encoding.ProtoUnmarshaler = (*AddRequest)(nil) - _ json.Marshaler = (*AddRequest)(nil) - _ json.Unmarshaler = (*AddRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *AddRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *AddRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(AddRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *AddRequest) GetBody() *AddRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *AddRequest) SetBody(v *AddRequest_Body) { - x.Body = v -} -func (x *AddRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *AddRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *AddRequest_Body - f = new(AddRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddResponse_Body struct { - NodeId uint64 `json:"nodeId"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*AddResponse_Body)(nil) - _ json.Marshaler = (*AddResponse_Body)(nil) - _ json.Unmarshaler = (*AddResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt64Size(1, x.NodeId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.NodeId != 0 { - mm.AppendUint64(1, x.NodeId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddResponse_Body") - } - switch fc.FieldNum { - case 1: // NodeId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "NodeId") - } - x.NodeId = data - } - } - return nil -} -func (x *AddResponse_Body) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} -func (x *AddResponse_Body) SetNodeId(v uint64) { - x.NodeId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodeId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "nodeId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.NodeId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddResponse struct { - Body *AddResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddResponse)(nil) - _ encoding.ProtoUnmarshaler = (*AddResponse)(nil) - _ json.Marshaler = (*AddResponse)(nil) - _ json.Unmarshaler = (*AddResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *AddResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *AddResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(AddResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *AddResponse) GetBody() *AddResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *AddResponse) SetBody(v *AddResponse_Body) { - x.Body = v -} -func (x *AddResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *AddResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *AddResponse_Body - f = new(AddResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddByPathRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - PathAttribute string `json:"pathAttribute"` - Path []string `json:"path"` - Meta []KeyValue `json:"meta"` - BearerToken []byte `json:"bearerToken"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddByPathRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*AddByPathRequest_Body)(nil) - _ json.Marshaler = (*AddByPathRequest_Body)(nil) - _ json.Unmarshaler = (*AddByPathRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddByPathRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.StringSize(3, x.PathAttribute) - size += proto.RepeatedStringSize(4, x.Path) - for i := range x.Meta { - size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i]) - } - size += proto.BytesSize(6, x.BearerToken) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddByPathRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if len(x.PathAttribute) != 0 { - mm.AppendString(3, x.PathAttribute) - } - for j := range x.Path { - mm.AppendString(4, x.Path[j]) - } - for i := range x.Meta { - x.Meta[i].EmitProtobuf(mm.AppendMessage(5)) - } - if len(x.BearerToken) != 0 { - mm.AppendBytes(6, x.BearerToken) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddByPathRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // PathAttribute - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "PathAttribute") - } - x.PathAttribute = data - case 4: // Path - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Path") - } - x.Path = append(x.Path, data) - case 5: // Meta - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Meta") - } - x.Meta = append(x.Meta, KeyValue{}) - ff := &x.Meta[len(x.Meta)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 6: // BearerToken - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "BearerToken") - } - x.BearerToken = data - } - } - return nil -} -func (x *AddByPathRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *AddByPathRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *AddByPathRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *AddByPathRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *AddByPathRequest_Body) GetPathAttribute() string { - if x != nil { - return x.PathAttribute - } - return "" -} -func (x *AddByPathRequest_Body) SetPathAttribute(v string) { - x.PathAttribute = v -} -func (x *AddByPathRequest_Body) GetPath() []string { - if x != nil { - return x.Path - } - return nil -} -func (x *AddByPathRequest_Body) SetPath(v []string) { - x.Path = v -} -func (x *AddByPathRequest_Body) GetMeta() []KeyValue { - if x != nil { - return x.Meta - } - return nil -} -func (x *AddByPathRequest_Body) SetMeta(v []KeyValue) { - x.Meta = v -} -func (x *AddByPathRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} -func (x *AddByPathRequest_Body) SetBearerToken(v []byte) { - x.BearerToken = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddByPathRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"pathAttribute\":" - out.RawString(prefix) - out.String(x.PathAttribute) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"path\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Path { - if i != 0 { - out.RawByte(',') - } - out.String(x.Path[i]) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"meta\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Meta { - if i != 0 { - out.RawByte(',') - } - x.Meta[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"bearerToken\":" - out.RawString(prefix) - if x.BearerToken != nil { - out.Base64Bytes(x.BearerToken) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddByPathRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "pathAttribute": - { - var f string - f = in.String() - x.PathAttribute = f - } - case "path": - { - var f string - var list []string - in.Delim('[') - for !in.IsDelim(']') { - f = in.String() - list = append(list, f) - in.WantComma() - } - x.Path = list - in.Delim(']') - } - case "meta": - { - var f KeyValue - var list []KeyValue - in.Delim('[') - for !in.IsDelim(']') { - f = KeyValue{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Meta = list - in.Delim(']') - } - case "bearerToken": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.BearerToken = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddByPathRequest struct { - Body *AddByPathRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddByPathRequest)(nil) - _ encoding.ProtoUnmarshaler = (*AddByPathRequest)(nil) - _ json.Marshaler = (*AddByPathRequest)(nil) - _ json.Unmarshaler = (*AddByPathRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddByPathRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *AddByPathRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *AddByPathRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddByPathRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddByPathRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddByPathRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(AddByPathRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *AddByPathRequest) SetBody(v *AddByPathRequest_Body) { - x.Body = v -} -func (x *AddByPathRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *AddByPathRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddByPathRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddByPathRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *AddByPathRequest_Body - f = new(AddByPathRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddByPathResponse_Body struct { - Nodes []uint64 `json:"nodes"` - ParentId uint64 `json:"parentId"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddByPathResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*AddByPathResponse_Body)(nil) - _ json.Marshaler = (*AddByPathResponse_Body)(nil) - _ json.Unmarshaler = (*AddByPathResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddByPathResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - var n int - n, _ = proto.RepeatedUInt64Size(1, x.Nodes) - size += n - size += proto.UInt64Size(2, x.ParentId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddByPathResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Nodes) != 0 { - mm.AppendUint64s(1, x.Nodes) - } - if x.ParentId != 0 { - mm.AppendUint64(2, x.ParentId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddByPathResponse_Body") - } - switch fc.FieldNum { - case 1: // Nodes - data, ok := fc.UnpackUint64s(nil) - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Nodes") - } - x.Nodes = data - case 2: // ParentId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ParentId") - } - x.ParentId = data - } - } - return nil -} -func (x *AddByPathResponse_Body) GetNodes() []uint64 { - if x != nil { - return x.Nodes - } - return nil -} -func (x *AddByPathResponse_Body) SetNodes(v []uint64) { - x.Nodes = v -} -func (x *AddByPathResponse_Body) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} -func (x *AddByPathResponse_Body) SetParentId(v uint64) { - x.ParentId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddByPathResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodes\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Nodes { - if i != 0 { - out.RawByte(',') - } - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10) - out.RawByte('"') - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parentId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddByPathResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "nodes": - { - var f uint64 - var list []uint64 - in.Delim('[') - for !in.IsDelim(']') { - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - list = append(list, f) - in.WantComma() - } - x.Nodes = list - in.Delim(']') - } - case "parentId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.ParentId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type AddByPathResponse struct { - Body *AddByPathResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*AddByPathResponse)(nil) - _ encoding.ProtoUnmarshaler = (*AddByPathResponse)(nil) - _ json.Marshaler = (*AddByPathResponse)(nil) - _ json.Unmarshaler = (*AddByPathResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *AddByPathResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *AddByPathResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *AddByPathResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *AddByPathResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *AddByPathResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "AddByPathResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(AddByPathResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *AddByPathResponse) SetBody(v *AddByPathResponse_Body) { - x.Body = v -} -func (x *AddByPathResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *AddByPathResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *AddByPathResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *AddByPathResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *AddByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *AddByPathResponse_Body - f = new(AddByPathResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - NodeId uint64 `json:"nodeId"` - BearerToken []byte `json:"bearerToken"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveRequest_Body)(nil) - _ json.Marshaler = (*RemoveRequest_Body)(nil) - _ json.Unmarshaler = (*RemoveRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.UInt64Size(3, x.NodeId) - size += proto.BytesSize(4, x.BearerToken) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if x.NodeId != 0 { - mm.AppendUint64(3, x.NodeId) - } - if len(x.BearerToken) != 0 { - mm.AppendBytes(4, x.BearerToken) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // NodeId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "NodeId") - } - x.NodeId = data - case 4: // BearerToken - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "BearerToken") - } - x.BearerToken = data - } - } - return nil -} -func (x *RemoveRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *RemoveRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *RemoveRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *RemoveRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *RemoveRequest_Body) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} -func (x *RemoveRequest_Body) SetNodeId(v uint64) { - x.NodeId = v -} -func (x *RemoveRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} -func (x *RemoveRequest_Body) SetBearerToken(v []byte) { - x.BearerToken = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodeId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"bearerToken\":" - out.RawString(prefix) - if x.BearerToken != nil { - out.Base64Bytes(x.BearerToken) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "nodeId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.NodeId = f - } - case "bearerToken": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.BearerToken = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveRequest struct { - Body *RemoveRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveRequest)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveRequest)(nil) - _ json.Marshaler = (*RemoveRequest)(nil) - _ json.Unmarshaler = (*RemoveRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveRequest) GetBody() *RemoveRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveRequest) SetBody(v *RemoveRequest_Body) { - x.Body = v -} -func (x *RemoveRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveRequest_Body - f = new(RemoveRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveResponse_Body)(nil) - _ json.Marshaler = (*RemoveResponse_Body)(nil) - _ json.Unmarshaler = (*RemoveResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type RemoveResponse struct { - Body *RemoveResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*RemoveResponse)(nil) - _ encoding.ProtoUnmarshaler = (*RemoveResponse)(nil) - _ json.Marshaler = (*RemoveResponse)(nil) - _ json.Unmarshaler = (*RemoveResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *RemoveResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *RemoveResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *RemoveResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *RemoveResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *RemoveResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "RemoveResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(RemoveResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *RemoveResponse) GetBody() *RemoveResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *RemoveResponse) SetBody(v *RemoveResponse_Body) { - x.Body = v -} -func (x *RemoveResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *RemoveResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *RemoveResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *RemoveResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *RemoveResponse_Body - f = new(RemoveResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type MoveRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - ParentId uint64 `json:"parentId"` - NodeId uint64 `json:"nodeId"` - Meta []KeyValue `json:"meta"` - BearerToken []byte `json:"bearerToken"` -} - -var ( - _ encoding.ProtoMarshaler = (*MoveRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*MoveRequest_Body)(nil) - _ json.Marshaler = (*MoveRequest_Body)(nil) - _ json.Unmarshaler = (*MoveRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *MoveRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.UInt64Size(3, x.ParentId) - size += proto.UInt64Size(4, x.NodeId) - for i := range x.Meta { - size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i]) - } - size += proto.BytesSize(6, x.BearerToken) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *MoveRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if x.ParentId != 0 { - mm.AppendUint64(3, x.ParentId) - } - if x.NodeId != 0 { - mm.AppendUint64(4, x.NodeId) - } - for i := range x.Meta { - x.Meta[i].EmitProtobuf(mm.AppendMessage(5)) - } - if len(x.BearerToken) != 0 { - mm.AppendBytes(6, x.BearerToken) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "MoveRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // ParentId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ParentId") - } - x.ParentId = data - case 4: // NodeId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "NodeId") - } - x.NodeId = data - case 5: // Meta - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Meta") - } - x.Meta = append(x.Meta, KeyValue{}) - ff := &x.Meta[len(x.Meta)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 6: // BearerToken - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "BearerToken") - } - x.BearerToken = data - } - } - return nil -} -func (x *MoveRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *MoveRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *MoveRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *MoveRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *MoveRequest_Body) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} -func (x *MoveRequest_Body) SetParentId(v uint64) { - x.ParentId = v -} -func (x *MoveRequest_Body) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} -func (x *MoveRequest_Body) SetNodeId(v uint64) { - x.NodeId = v -} -func (x *MoveRequest_Body) GetMeta() []KeyValue { - if x != nil { - return x.Meta - } - return nil -} -func (x *MoveRequest_Body) SetMeta(v []KeyValue) { - x.Meta = v -} -func (x *MoveRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} -func (x *MoveRequest_Body) SetBearerToken(v []byte) { - x.BearerToken = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *MoveRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parentId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodeId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"meta\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Meta { - if i != 0 { - out.RawByte(',') - } - x.Meta[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"bearerToken\":" - out.RawString(prefix) - if x.BearerToken != nil { - out.Base64Bytes(x.BearerToken) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *MoveRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "parentId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.ParentId = f - } - case "nodeId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.NodeId = f - } - case "meta": - { - var f KeyValue - var list []KeyValue - in.Delim('[') - for !in.IsDelim(']') { - f = KeyValue{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Meta = list - in.Delim(']') - } - case "bearerToken": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.BearerToken = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type MoveRequest struct { - Body *MoveRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*MoveRequest)(nil) - _ encoding.ProtoUnmarshaler = (*MoveRequest)(nil) - _ json.Marshaler = (*MoveRequest)(nil) - _ json.Unmarshaler = (*MoveRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *MoveRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *MoveRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *MoveRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *MoveRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *MoveRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "MoveRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(MoveRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *MoveRequest) GetBody() *MoveRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *MoveRequest) SetBody(v *MoveRequest_Body) { - x.Body = v -} -func (x *MoveRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *MoveRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *MoveRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *MoveRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *MoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *MoveRequest_Body - f = new(MoveRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type MoveResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*MoveResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*MoveResponse_Body)(nil) - _ json.Marshaler = (*MoveResponse_Body)(nil) - _ json.Unmarshaler = (*MoveResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *MoveResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *MoveResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *MoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *MoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "MoveResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *MoveResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *MoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *MoveResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *MoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type MoveResponse struct { - Body *MoveResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*MoveResponse)(nil) - _ encoding.ProtoUnmarshaler = (*MoveResponse)(nil) - _ json.Marshaler = (*MoveResponse)(nil) - _ json.Unmarshaler = (*MoveResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *MoveResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *MoveResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *MoveResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *MoveResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *MoveResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "MoveResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(MoveResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *MoveResponse) GetBody() *MoveResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *MoveResponse) SetBody(v *MoveResponse_Body) { - x.Body = v -} -func (x *MoveResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *MoveResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *MoveResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *MoveResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *MoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *MoveResponse_Body - f = new(MoveResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNodeByPathRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - PathAttribute string `json:"pathAttribute"` - Path []string `json:"path"` - Attributes []string `json:"attributes"` - LatestOnly bool `json:"latestOnly"` - AllAttributes bool `json:"allAttributes"` - BearerToken []byte `json:"bearerToken"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNodeByPathRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest_Body)(nil) - _ json.Marshaler = (*GetNodeByPathRequest_Body)(nil) - _ json.Unmarshaler = (*GetNodeByPathRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNodeByPathRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.StringSize(3, x.PathAttribute) - size += proto.RepeatedStringSize(4, x.Path) - size += proto.RepeatedStringSize(5, x.Attributes) - size += proto.BoolSize(6, x.LatestOnly) - size += proto.BoolSize(7, x.AllAttributes) - size += proto.BytesSize(8, x.BearerToken) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNodeByPathRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNodeByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if len(x.PathAttribute) != 0 { - mm.AppendString(3, x.PathAttribute) - } - for j := range x.Path { - mm.AppendString(4, x.Path[j]) - } - for j := range x.Attributes { - mm.AppendString(5, x.Attributes[j]) - } - if x.LatestOnly { - mm.AppendBool(6, x.LatestOnly) - } - if x.AllAttributes { - mm.AppendBool(7, x.AllAttributes) - } - if len(x.BearerToken) != 0 { - mm.AppendBytes(8, x.BearerToken) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNodeByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // PathAttribute - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "PathAttribute") - } - x.PathAttribute = data - case 4: // Path - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Path") - } - x.Path = append(x.Path, data) - case 5: // Attributes - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Attributes") - } - x.Attributes = append(x.Attributes, data) - case 6: // LatestOnly - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "LatestOnly") - } - x.LatestOnly = data - case 7: // AllAttributes - data, ok := fc.Bool() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "AllAttributes") - } - x.AllAttributes = data - case 8: // BearerToken - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "BearerToken") - } - x.BearerToken = data - } - } - return nil -} -func (x *GetNodeByPathRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *GetNodeByPathRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *GetNodeByPathRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *GetNodeByPathRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *GetNodeByPathRequest_Body) GetPathAttribute() string { - if x != nil { - return x.PathAttribute - } - return "" -} -func (x *GetNodeByPathRequest_Body) SetPathAttribute(v string) { - x.PathAttribute = v -} -func (x *GetNodeByPathRequest_Body) GetPath() []string { - if x != nil { - return x.Path - } - return nil -} -func (x *GetNodeByPathRequest_Body) SetPath(v []string) { - x.Path = v -} -func (x *GetNodeByPathRequest_Body) GetAttributes() []string { - if x != nil { - return x.Attributes - } - return nil -} -func (x *GetNodeByPathRequest_Body) SetAttributes(v []string) { - x.Attributes = v -} -func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool { - if x != nil { - return x.LatestOnly - } - return false -} -func (x *GetNodeByPathRequest_Body) SetLatestOnly(v bool) { - x.LatestOnly = v -} -func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool { - if x != nil { - return x.AllAttributes - } - return false -} -func (x *GetNodeByPathRequest_Body) SetAllAttributes(v bool) { - x.AllAttributes = v -} -func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} -func (x *GetNodeByPathRequest_Body) SetBearerToken(v []byte) { - x.BearerToken = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNodeByPathRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"pathAttribute\":" - out.RawString(prefix) - out.String(x.PathAttribute) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"path\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Path { - if i != 0 { - out.RawByte(',') - } - out.String(x.Path[i]) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"attributes\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Attributes { - if i != 0 { - out.RawByte(',') - } - out.String(x.Attributes[i]) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"latestOnly\":" - out.RawString(prefix) - out.Bool(x.LatestOnly) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"allAttributes\":" - out.RawString(prefix) - out.Bool(x.AllAttributes) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"bearerToken\":" - out.RawString(prefix) - if x.BearerToken != nil { - out.Base64Bytes(x.BearerToken) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNodeByPathRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "pathAttribute": - { - var f string - f = in.String() - x.PathAttribute = f - } - case "path": - { - var f string - var list []string - in.Delim('[') - for !in.IsDelim(']') { - f = in.String() - list = append(list, f) - in.WantComma() - } - x.Path = list - in.Delim(']') - } - case "attributes": - { - var f string - var list []string - in.Delim('[') - for !in.IsDelim(']') { - f = in.String() - list = append(list, f) - in.WantComma() - } - x.Attributes = list - in.Delim(']') - } - case "latestOnly": - { - var f bool - f = in.Bool() - x.LatestOnly = f - } - case "allAttributes": - { - var f bool - f = in.Bool() - x.AllAttributes = f - } - case "bearerToken": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.BearerToken = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNodeByPathRequest struct { - Body *GetNodeByPathRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNodeByPathRequest)(nil) - _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest)(nil) - _ json.Marshaler = (*GetNodeByPathRequest)(nil) - _ json.Unmarshaler = (*GetNodeByPathRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNodeByPathRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetNodeByPathRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetNodeByPathRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNodeByPathRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNodeByPathRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetNodeByPathRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetNodeByPathRequest) SetBody(v *GetNodeByPathRequest_Body) { - x.Body = v -} -func (x *GetNodeByPathRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetNodeByPathRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNodeByPathRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNodeByPathRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetNodeByPathRequest_Body - f = new(GetNodeByPathRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNodeByPathResponse_Info struct { - NodeId uint64 `json:"nodeId"` - Timestamp uint64 `json:"timestamp"` - Meta []KeyValue `json:"meta"` - ParentId uint64 `json:"parentId"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Info)(nil) - _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Info)(nil) - _ json.Marshaler = (*GetNodeByPathResponse_Info)(nil) - _ json.Unmarshaler = (*GetNodeByPathResponse_Info)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNodeByPathResponse_Info) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt64Size(1, x.NodeId) - size += proto.UInt64Size(2, x.Timestamp) - for i := range x.Meta { - size += proto.NestedStructureSizeUnchecked(3, &x.Meta[i]) - } - size += proto.UInt64Size(4, x.ParentId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNodeByPathResponse_Info) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.NodeId != 0 { - mm.AppendUint64(1, x.NodeId) - } - if x.Timestamp != 0 { - mm.AppendUint64(2, x.Timestamp) - } - for i := range x.Meta { - x.Meta[i].EmitProtobuf(mm.AppendMessage(3)) - } - if x.ParentId != 0 { - mm.AppendUint64(4, x.ParentId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Info") - } - switch fc.FieldNum { - case 1: // NodeId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "NodeId") - } - x.NodeId = data - case 2: // Timestamp - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Timestamp") - } - x.Timestamp = data - case 3: // Meta - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Meta") - } - x.Meta = append(x.Meta, KeyValue{}) - ff := &x.Meta[len(x.Meta)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 4: // ParentId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ParentId") - } - x.ParentId = data - } - } - return nil -} -func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 { - if x != nil { - return x.NodeId - } - return 0 -} -func (x *GetNodeByPathResponse_Info) SetNodeId(v uint64) { - x.NodeId = v -} -func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 { - if x != nil { - return x.Timestamp - } - return 0 -} -func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) { - x.Timestamp = v -} -func (x *GetNodeByPathResponse_Info) GetMeta() []KeyValue { - if x != nil { - return x.Meta - } - return nil -} -func (x *GetNodeByPathResponse_Info) SetMeta(v []KeyValue) { - x.Meta = v -} -func (x *GetNodeByPathResponse_Info) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} -func (x *GetNodeByPathResponse_Info) SetParentId(v uint64) { - x.ParentId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNodeByPathResponse_Info) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodeId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"timestamp\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"meta\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Meta { - if i != 0 { - out.RawByte(',') - } - x.Meta[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parentId\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNodeByPathResponse_Info) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "nodeId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.NodeId = f - } - case "timestamp": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.Timestamp = f - } - case "meta": - { - var f KeyValue - var list []KeyValue - in.Delim('[') - for !in.IsDelim(']') { - f = KeyValue{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Meta = list - in.Delim(']') - } - case "parentId": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.ParentId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNodeByPathResponse_Body struct { - Nodes []GetNodeByPathResponse_Info `json:"nodes"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Body)(nil) - _ json.Marshaler = (*GetNodeByPathResponse_Body)(nil) - _ json.Unmarshaler = (*GetNodeByPathResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNodeByPathResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - for i := range x.Nodes { - size += proto.NestedStructureSizeUnchecked(1, &x.Nodes[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNodeByPathResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for i := range x.Nodes { - x.Nodes[i].EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Body") - } - switch fc.FieldNum { - case 1: // Nodes - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Nodes") - } - x.Nodes = append(x.Nodes, GetNodeByPathResponse_Info{}) - ff := &x.Nodes[len(x.Nodes)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetNodeByPathResponse_Body) GetNodes() []GetNodeByPathResponse_Info { - if x != nil { - return x.Nodes - } - return nil -} -func (x *GetNodeByPathResponse_Body) SetNodes(v []GetNodeByPathResponse_Info) { - x.Nodes = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNodeByPathResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodes\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Nodes { - if i != 0 { - out.RawByte(',') - } - x.Nodes[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNodeByPathResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "nodes": - { - var f GetNodeByPathResponse_Info - var list []GetNodeByPathResponse_Info - in.Delim('[') - for !in.IsDelim(']') { - f = GetNodeByPathResponse_Info{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Nodes = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetNodeByPathResponse struct { - Body *GetNodeByPathResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetNodeByPathResponse)(nil) - _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse)(nil) - _ json.Marshaler = (*GetNodeByPathResponse)(nil) - _ json.Unmarshaler = (*GetNodeByPathResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetNodeByPathResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetNodeByPathResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetNodeByPathResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetNodeByPathResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetNodeByPathResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetNodeByPathResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetNodeByPathResponse) SetBody(v *GetNodeByPathResponse_Body) { - x.Body = v -} -func (x *GetNodeByPathResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetNodeByPathResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetNodeByPathResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetNodeByPathResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetNodeByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetNodeByPathResponse_Body - f = new(GetNodeByPathResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetSubTreeRequest_Body_Order_Direction int32 - -const ( - GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0 - GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1 -) - -var ( - GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{ - 0: "None", - 1: "Asc", - } - GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{ - "None": 0, - "Asc": 1, - } -) - -func (x GetSubTreeRequest_Body_Order_Direction) String() string { - if v, ok := GetSubTreeRequest_Body_Order_Direction_name[int32(x)]; ok { - return v - } - return strconv.FormatInt(int64(x), 10) -} -func (x *GetSubTreeRequest_Body_Order_Direction) FromString(s string) bool { - if v, ok := GetSubTreeRequest_Body_Order_Direction_value[s]; ok { - *x = GetSubTreeRequest_Body_Order_Direction(v) - return true - } - return false -} - -type GetSubTreeRequest_Body_Order struct { - Direction GetSubTreeRequest_Body_Order_Direction `json:"direction"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body_Order)(nil) - _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body_Order)(nil) - _ json.Marshaler = (*GetSubTreeRequest_Body_Order)(nil) - _ json.Unmarshaler = (*GetSubTreeRequest_Body_Order)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetSubTreeRequest_Body_Order) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.EnumSize(1, int32(x.Direction)) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetSubTreeRequest_Body_Order) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetSubTreeRequest_Body_Order) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if int32(x.Direction) != 0 { - mm.AppendInt32(1, int32(x.Direction)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetSubTreeRequest_Body_Order) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body_Order") - } - switch fc.FieldNum { - case 1: // Direction - data, ok := fc.Int32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Direction") - } - x.Direction = GetSubTreeRequest_Body_Order_Direction(data) - } - } - return nil -} -func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction { - if x != nil { - return x.Direction - } - return 0 -} -func (x *GetSubTreeRequest_Body_Order) SetDirection(v GetSubTreeRequest_Body_Order_Direction) { - x.Direction = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetSubTreeRequest_Body_Order) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"direction\":" - out.RawString(prefix) - v := int32(x.Direction) - if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok { - out.String(vv) - } else { - out.Int32(v) - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetSubTreeRequest_Body_Order) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetSubTreeRequest_Body_Order) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "direction": - { - var f GetSubTreeRequest_Body_Order_Direction - var parsedValue GetSubTreeRequest_Body_Order_Direction - switch v := in.Interface().(type) { - case string: - if vv, ok := GetSubTreeRequest_Body_Order_Direction_value[v]; ok { - parsedValue = GetSubTreeRequest_Body_Order_Direction(vv) - break - } - vv, err := strconv.ParseInt(v, 10, 32) - if err != nil { - in.AddError(err) - return - } - parsedValue = GetSubTreeRequest_Body_Order_Direction(vv) - case float64: - parsedValue = GetSubTreeRequest_Body_Order_Direction(v) - } - f = parsedValue - x.Direction = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetSubTreeRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - RootId []uint64 `json:"rootId"` - Depth uint32 `json:"depth"` - BearerToken []byte `json:"bearerToken"` - OrderBy *GetSubTreeRequest_Body_Order `json:"orderBy"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body)(nil) - _ json.Marshaler = (*GetSubTreeRequest_Body)(nil) - _ json.Unmarshaler = (*GetSubTreeRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetSubTreeRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - for i := range x.RootId { - size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.RootId[i])) - } - size += proto.UInt32Size(4, x.Depth) - size += proto.BytesSize(5, x.BearerToken) - size += proto.NestedStructureSize(6, x.OrderBy) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetSubTreeRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - for j := range x.RootId { - mm.AppendUint64(3, x.RootId[j]) - } - if x.Depth != 0 { - mm.AppendUint32(4, x.Depth) - } - if len(x.BearerToken) != 0 { - mm.AppendBytes(5, x.BearerToken) - } - if x.OrderBy != nil { - x.OrderBy.EmitProtobuf(mm.AppendMessage(6)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetSubTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // RootId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "RootId") - } - x.RootId = append(x.RootId, data) - case 4: // Depth - data, ok := fc.Uint32() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Depth") - } - x.Depth = data - case 5: // BearerToken - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "BearerToken") - } - x.BearerToken = data - case 6: // OrderBy - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "OrderBy") - } - x.OrderBy = new(GetSubTreeRequest_Body_Order) - if err := x.OrderBy.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetSubTreeRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *GetSubTreeRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *GetSubTreeRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *GetSubTreeRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *GetSubTreeRequest_Body) GetRootId() []uint64 { - if x != nil { - return x.RootId - } - return nil -} -func (x *GetSubTreeRequest_Body) SetRootId(v []uint64) { - x.RootId = v -} -func (x *GetSubTreeRequest_Body) GetDepth() uint32 { - if x != nil { - return x.Depth - } - return 0 -} -func (x *GetSubTreeRequest_Body) SetDepth(v uint32) { - x.Depth = v -} -func (x *GetSubTreeRequest_Body) GetBearerToken() []byte { - if x != nil { - return x.BearerToken - } - return nil -} -func (x *GetSubTreeRequest_Body) SetBearerToken(v []byte) { - x.BearerToken = v -} -func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order { - if x != nil { - return x.OrderBy - } - return nil -} -func (x *GetSubTreeRequest_Body) SetOrderBy(v *GetSubTreeRequest_Body_Order) { - x.OrderBy = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetSubTreeRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"rootId\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.RootId { - if i != 0 { - out.RawByte(',') - } - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10) - out.RawByte('"') - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"depth\":" - out.RawString(prefix) - out.Uint32(x.Depth) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"bearerToken\":" - out.RawString(prefix) - if x.BearerToken != nil { - out.Base64Bytes(x.BearerToken) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"orderBy\":" - out.RawString(prefix) - x.OrderBy.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetSubTreeRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "rootId": - { - var f uint64 - var list []uint64 - in.Delim('[') - for !in.IsDelim(']') { - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - list = append(list, f) - in.WantComma() - } - x.RootId = list - in.Delim(']') - } - case "depth": - { - var f uint32 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 32) - if err != nil { - in.AddError(err) - return - } - pv := uint32(v) - f = pv - x.Depth = f - } - case "bearerToken": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.BearerToken = f - } - case "orderBy": - { - var f *GetSubTreeRequest_Body_Order - f = new(GetSubTreeRequest_Body_Order) - f.UnmarshalEasyJSON(in) - x.OrderBy = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetSubTreeRequest struct { - Body *GetSubTreeRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetSubTreeRequest)(nil) - _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest)(nil) - _ json.Marshaler = (*GetSubTreeRequest)(nil) - _ json.Unmarshaler = (*GetSubTreeRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetSubTreeRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetSubTreeRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetSubTreeRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetSubTreeRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetSubTreeRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetSubTreeRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetSubTreeRequest) SetBody(v *GetSubTreeRequest_Body) { - x.Body = v -} -func (x *GetSubTreeRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetSubTreeRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetSubTreeRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetSubTreeRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetSubTreeRequest_Body - f = new(GetSubTreeRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetSubTreeResponse_Body struct { - NodeId []uint64 `json:"nodeId"` - ParentId []uint64 `json:"parentId"` - Timestamp []uint64 `json:"timestamp"` - Meta []KeyValue `json:"meta"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetSubTreeResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse_Body)(nil) - _ json.Marshaler = (*GetSubTreeResponse_Body)(nil) - _ json.Unmarshaler = (*GetSubTreeResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetSubTreeResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - for i := range x.NodeId { - size += protowire.SizeGroup(protowire.Number(1), protowire.SizeVarint(x.NodeId[i])) - } - for i := range x.ParentId { - size += protowire.SizeGroup(protowire.Number(2), protowire.SizeVarint(x.ParentId[i])) - } - for i := range x.Timestamp { - size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.Timestamp[i])) - } - for i := range x.Meta { - size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i]) - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetSubTreeResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.NodeId { - mm.AppendUint64(1, x.NodeId[j]) - } - for j := range x.ParentId { - mm.AppendUint64(2, x.ParentId[j]) - } - for j := range x.Timestamp { - mm.AppendUint64(3, x.Timestamp[j]) - } - for i := range x.Meta { - x.Meta[i].EmitProtobuf(mm.AppendMessage(4)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse_Body") - } - switch fc.FieldNum { - case 1: // NodeId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "NodeId") - } - x.NodeId = append(x.NodeId, data) - case 2: // ParentId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ParentId") - } - x.ParentId = append(x.ParentId, data) - case 3: // Timestamp - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Timestamp") - } - x.Timestamp = append(x.Timestamp, data) - case 4: // Meta - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Meta") - } - x.Meta = append(x.Meta, KeyValue{}) - ff := &x.Meta[len(x.Meta)-1] - if err := ff.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 { - if x != nil { - return x.NodeId - } - return nil -} -func (x *GetSubTreeResponse_Body) SetNodeId(v []uint64) { - x.NodeId = v -} -func (x *GetSubTreeResponse_Body) GetParentId() []uint64 { - if x != nil { - return x.ParentId - } - return nil -} -func (x *GetSubTreeResponse_Body) SetParentId(v []uint64) { - x.ParentId = v -} -func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 { - if x != nil { - return x.Timestamp - } - return nil -} -func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) { - x.Timestamp = v -} -func (x *GetSubTreeResponse_Body) GetMeta() []KeyValue { - if x != nil { - return x.Meta - } - return nil -} -func (x *GetSubTreeResponse_Body) SetMeta(v []KeyValue) { - x.Meta = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetSubTreeResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"nodeId\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.NodeId { - if i != 0 { - out.RawByte(',') - } - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10) - out.RawByte('"') - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parentId\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.ParentId { - if i != 0 { - out.RawByte(',') - } - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10) - out.RawByte('"') - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"timestamp\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Timestamp { - if i != 0 { - out.RawByte(',') - } - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10) - out.RawByte('"') - } - out.RawByte(']') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"meta\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Meta { - if i != 0 { - out.RawByte(',') - } - x.Meta[i].MarshalEasyJSON(out) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetSubTreeResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "nodeId": - { - var f uint64 - var list []uint64 - in.Delim('[') - for !in.IsDelim(']') { - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - list = append(list, f) - in.WantComma() - } - x.NodeId = list - in.Delim(']') - } - case "parentId": - { - var f uint64 - var list []uint64 - in.Delim('[') - for !in.IsDelim(']') { - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - list = append(list, f) - in.WantComma() - } - x.ParentId = list - in.Delim(']') - } - case "timestamp": - { - var f uint64 - var list []uint64 - in.Delim('[') - for !in.IsDelim(']') { - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - list = append(list, f) - in.WantComma() - } - x.Timestamp = list - in.Delim(']') - } - case "meta": - { - var f KeyValue - var list []KeyValue - in.Delim('[') - for !in.IsDelim(']') { - f = KeyValue{} - f.UnmarshalEasyJSON(in) - list = append(list, f) - in.WantComma() - } - x.Meta = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetSubTreeResponse struct { - Body *GetSubTreeResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetSubTreeResponse)(nil) - _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse)(nil) - _ json.Marshaler = (*GetSubTreeResponse)(nil) - _ json.Unmarshaler = (*GetSubTreeResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetSubTreeResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetSubTreeResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetSubTreeResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetSubTreeResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetSubTreeResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetSubTreeResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetSubTreeResponse) SetBody(v *GetSubTreeResponse_Body) { - x.Body = v -} -func (x *GetSubTreeResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetSubTreeResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetSubTreeResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetSubTreeResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetSubTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetSubTreeResponse_Body - f = new(GetSubTreeResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TreeListRequest_Body struct { - ContainerId []byte `json:"containerId"` -} - -var ( - _ encoding.ProtoMarshaler = (*TreeListRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*TreeListRequest_Body)(nil) - _ json.Marshaler = (*TreeListRequest_Body)(nil) - _ json.Unmarshaler = (*TreeListRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TreeListRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TreeListRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TreeListRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TreeListRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TreeListRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - } - } - return nil -} -func (x *TreeListRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *TreeListRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TreeListRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TreeListRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TreeListRequest struct { - Body *TreeListRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*TreeListRequest)(nil) - _ encoding.ProtoUnmarshaler = (*TreeListRequest)(nil) - _ json.Marshaler = (*TreeListRequest)(nil) - _ json.Unmarshaler = (*TreeListRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TreeListRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *TreeListRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *TreeListRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TreeListRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TreeListRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TreeListRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(TreeListRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *TreeListRequest) GetBody() *TreeListRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *TreeListRequest) SetBody(v *TreeListRequest_Body) { - x.Body = v -} -func (x *TreeListRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *TreeListRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TreeListRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TreeListRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TreeListRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *TreeListRequest_Body - f = new(TreeListRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TreeListResponse_Body struct { - Ids []string `json:"ids"` -} - -var ( - _ encoding.ProtoMarshaler = (*TreeListResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*TreeListResponse_Body)(nil) - _ json.Marshaler = (*TreeListResponse_Body)(nil) - _ json.Unmarshaler = (*TreeListResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TreeListResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedStringSize(1, x.Ids) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TreeListResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TreeListResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Ids { - mm.AppendString(1, x.Ids[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TreeListResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TreeListResponse_Body") - } - switch fc.FieldNum { - case 1: // Ids - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Ids") - } - x.Ids = append(x.Ids, data) - } - } - return nil -} -func (x *TreeListResponse_Body) GetIds() []string { - if x != nil { - return x.Ids - } - return nil -} -func (x *TreeListResponse_Body) SetIds(v []string) { - x.Ids = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TreeListResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"ids\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Ids { - if i != 0 { - out.RawByte(',') - } - out.String(x.Ids[i]) - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TreeListResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TreeListResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "ids": - { - var f string - var list []string - in.Delim('[') - for !in.IsDelim(']') { - f = in.String() - list = append(list, f) - in.WantComma() - } - x.Ids = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type TreeListResponse struct { - Body *TreeListResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*TreeListResponse)(nil) - _ encoding.ProtoUnmarshaler = (*TreeListResponse)(nil) - _ json.Marshaler = (*TreeListResponse)(nil) - _ json.Unmarshaler = (*TreeListResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *TreeListResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *TreeListResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *TreeListResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *TreeListResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *TreeListResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "TreeListResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(TreeListResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *TreeListResponse) GetBody() *TreeListResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *TreeListResponse) SetBody(v *TreeListResponse_Body) { - x.Body = v -} -func (x *TreeListResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *TreeListResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *TreeListResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *TreeListResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *TreeListResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *TreeListResponse_Body - f = new(TreeListResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ApplyRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - Operation *LogMove `json:"operation"` -} - -var ( - _ encoding.ProtoMarshaler = (*ApplyRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ApplyRequest_Body)(nil) - _ json.Marshaler = (*ApplyRequest_Body)(nil) - _ json.Unmarshaler = (*ApplyRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ApplyRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.NestedStructureSize(3, x.Operation) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ApplyRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if x.Operation != nil { - x.Operation.EmitProtobuf(mm.AppendMessage(3)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ApplyRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ApplyRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // Operation - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Operation") - } - x.Operation = new(LogMove) - if err := x.Operation.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ApplyRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *ApplyRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *ApplyRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *ApplyRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *ApplyRequest_Body) GetOperation() *LogMove { - if x != nil { - return x.Operation - } - return nil -} -func (x *ApplyRequest_Body) SetOperation(v *LogMove) { - x.Operation = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ApplyRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"operation\":" - out.RawString(prefix) - x.Operation.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ApplyRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "operation": - { - var f *LogMove - f = new(LogMove) - f.UnmarshalEasyJSON(in) - x.Operation = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ApplyRequest struct { - Body *ApplyRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ApplyRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ApplyRequest)(nil) - _ json.Marshaler = (*ApplyRequest)(nil) - _ json.Unmarshaler = (*ApplyRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ApplyRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ApplyRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ApplyRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ApplyRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ApplyRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ApplyRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ApplyRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ApplyRequest) GetBody() *ApplyRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ApplyRequest) SetBody(v *ApplyRequest_Body) { - x.Body = v -} -func (x *ApplyRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ApplyRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ApplyRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ApplyRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ApplyRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ApplyRequest_Body - f = new(ApplyRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ApplyResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*ApplyResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ApplyResponse_Body)(nil) - _ json.Marshaler = (*ApplyResponse_Body)(nil) - _ json.Unmarshaler = (*ApplyResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ApplyResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ApplyResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ApplyResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ApplyResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ApplyResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ApplyResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ApplyResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ApplyResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ApplyResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ApplyResponse struct { - Body *ApplyResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ApplyResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ApplyResponse)(nil) - _ json.Marshaler = (*ApplyResponse)(nil) - _ json.Unmarshaler = (*ApplyResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ApplyResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ApplyResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ApplyResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ApplyResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ApplyResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ApplyResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ApplyResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ApplyResponse) GetBody() *ApplyResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ApplyResponse) SetBody(v *ApplyResponse_Body) { - x.Body = v -} -func (x *ApplyResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ApplyResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ApplyResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ApplyResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ApplyResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ApplyResponse_Body - f = new(ApplyResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetOpLogRequest_Body struct { - ContainerId []byte `json:"containerId"` - TreeId string `json:"treeId"` - Height uint64 `json:"height"` - Count uint64 `json:"count"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetOpLogRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetOpLogRequest_Body)(nil) - _ json.Marshaler = (*GetOpLogRequest_Body)(nil) - _ json.Unmarshaler = (*GetOpLogRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetOpLogRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.ContainerId) - size += proto.StringSize(2, x.TreeId) - size += proto.UInt64Size(3, x.Height) - size += proto.UInt64Size(4, x.Count) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetOpLogRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetOpLogRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ContainerId) != 0 { - mm.AppendBytes(1, x.ContainerId) - } - if len(x.TreeId) != 0 { - mm.AppendString(2, x.TreeId) - } - if x.Height != 0 { - mm.AppendUint64(3, x.Height) - } - if x.Count != 0 { - mm.AppendUint64(4, x.Count) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetOpLogRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest_Body") - } - switch fc.FieldNum { - case 1: // ContainerId - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - case 2: // TreeId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "TreeId") - } - x.TreeId = data - case 3: // Height - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Height") - } - x.Height = data - case 4: // Count - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Count") - } - x.Count = data - } - } - return nil -} -func (x *GetOpLogRequest_Body) GetContainerId() []byte { - if x != nil { - return x.ContainerId - } - return nil -} -func (x *GetOpLogRequest_Body) SetContainerId(v []byte) { - x.ContainerId = v -} -func (x *GetOpLogRequest_Body) GetTreeId() string { - if x != nil { - return x.TreeId - } - return "" -} -func (x *GetOpLogRequest_Body) SetTreeId(v string) { - x.TreeId = v -} -func (x *GetOpLogRequest_Body) GetHeight() uint64 { - if x != nil { - return x.Height - } - return 0 -} -func (x *GetOpLogRequest_Body) SetHeight(v uint64) { - x.Height = v -} -func (x *GetOpLogRequest_Body) GetCount() uint64 { - if x != nil { - return x.Count - } - return 0 -} -func (x *GetOpLogRequest_Body) SetCount(v uint64) { - x.Count = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetOpLogRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - if x.ContainerId != nil { - out.Base64Bytes(x.ContainerId) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"treeId\":" - out.RawString(prefix) - out.String(x.TreeId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"height\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"count\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetOpLogRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "containerId": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.ContainerId = f - } - case "treeId": - { - var f string - f = in.String() - x.TreeId = f - } - case "height": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.Height = f - } - case "count": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.Count = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetOpLogRequest struct { - Body *GetOpLogRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetOpLogRequest)(nil) - _ encoding.ProtoUnmarshaler = (*GetOpLogRequest)(nil) - _ json.Marshaler = (*GetOpLogRequest)(nil) - _ json.Unmarshaler = (*GetOpLogRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetOpLogRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetOpLogRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetOpLogRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetOpLogRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetOpLogRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetOpLogRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetOpLogRequest) SetBody(v *GetOpLogRequest_Body) { - x.Body = v -} -func (x *GetOpLogRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetOpLogRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetOpLogRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetOpLogRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetOpLogRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetOpLogRequest_Body - f = new(GetOpLogRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetOpLogResponse_Body struct { - Operation *LogMove `json:"operation"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetOpLogResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*GetOpLogResponse_Body)(nil) - _ json.Marshaler = (*GetOpLogResponse_Body)(nil) - _ json.Unmarshaler = (*GetOpLogResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetOpLogResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Operation) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetOpLogResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Operation != nil { - x.Operation.EmitProtobuf(mm.AppendMessage(1)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetOpLogResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse_Body") - } - switch fc.FieldNum { - case 1: // Operation - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Operation") - } - x.Operation = new(LogMove) - if err := x.Operation.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetOpLogResponse_Body) GetOperation() *LogMove { - if x != nil { - return x.Operation - } - return nil -} -func (x *GetOpLogResponse_Body) SetOperation(v *LogMove) { - x.Operation = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetOpLogResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"operation\":" - out.RawString(prefix) - x.Operation.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetOpLogResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetOpLogResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "operation": - { - var f *LogMove - f = new(LogMove) - f.UnmarshalEasyJSON(in) - x.Operation = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type GetOpLogResponse struct { - Body *GetOpLogResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*GetOpLogResponse)(nil) - _ encoding.ProtoUnmarshaler = (*GetOpLogResponse)(nil) - _ json.Marshaler = (*GetOpLogResponse)(nil) - _ json.Unmarshaler = (*GetOpLogResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *GetOpLogResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *GetOpLogResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *GetOpLogResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *GetOpLogResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *GetOpLogResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(GetOpLogResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *GetOpLogResponse) SetBody(v *GetOpLogResponse_Body) { - x.Body = v -} -func (x *GetOpLogResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *GetOpLogResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *GetOpLogResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *GetOpLogResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *GetOpLogResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *GetOpLogResponse_Body - f = new(GetOpLogResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthcheckResponse_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*HealthcheckResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*HealthcheckResponse_Body)(nil) - _ json.Marshaler = (*HealthcheckResponse_Body)(nil) - _ json.Unmarshaler = (*HealthcheckResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthcheckResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthcheckResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthcheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthcheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthcheckResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthcheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthcheckResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthcheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthcheckResponse struct { - Body *HealthcheckResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthcheckResponse)(nil) - _ encoding.ProtoUnmarshaler = (*HealthcheckResponse)(nil) - _ json.Marshaler = (*HealthcheckResponse)(nil) - _ json.Unmarshaler = (*HealthcheckResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthcheckResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *HealthcheckResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *HealthcheckResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthcheckResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthcheckResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(HealthcheckResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *HealthcheckResponse) SetBody(v *HealthcheckResponse_Body) { - x.Body = v -} -func (x *HealthcheckResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *HealthcheckResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthcheckResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthcheckResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthcheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *HealthcheckResponse_Body - f = new(HealthcheckResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthcheckRequest_Body struct { -} - -var ( - _ encoding.ProtoMarshaler = (*HealthcheckRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*HealthcheckRequest_Body)(nil) - _ json.Marshaler = (*HealthcheckRequest_Body)(nil) - _ json.Unmarshaler = (*HealthcheckRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthcheckRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthcheckRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthcheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthcheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest_Body") - } - switch fc.FieldNum { - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthcheckRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthcheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - out.RawByte('{') - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthcheckRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthcheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type HealthcheckRequest struct { - Body *HealthcheckRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*HealthcheckRequest)(nil) - _ encoding.ProtoUnmarshaler = (*HealthcheckRequest)(nil) - _ json.Marshaler = (*HealthcheckRequest)(nil) - _ json.Unmarshaler = (*HealthcheckRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *HealthcheckRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *HealthcheckRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *HealthcheckRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *HealthcheckRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *HealthcheckRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(HealthcheckRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *HealthcheckRequest) SetBody(v *HealthcheckRequest_Body) { - x.Body = v -} -func (x *HealthcheckRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *HealthcheckRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *HealthcheckRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *HealthcheckRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *HealthcheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *HealthcheckRequest_Body - f = new(HealthcheckRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/tree/service_grpc.pb.go b/pkg/services/tree/service_grpc.pb.go deleted file mode 100644 index 63f96e11a..000000000 --- a/pkg/services/tree/service_grpc.pb.go +++ /dev/null @@ -1,520 +0,0 @@ -//* -// Service for working with CRDT tree. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.25.0 -// source: pkg/services/tree/service.proto - -package tree - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - TreeService_Add_FullMethodName = "/tree.TreeService/Add" - TreeService_AddByPath_FullMethodName = "/tree.TreeService/AddByPath" - TreeService_Remove_FullMethodName = "/tree.TreeService/Remove" - TreeService_Move_FullMethodName = "/tree.TreeService/Move" - TreeService_GetNodeByPath_FullMethodName = "/tree.TreeService/GetNodeByPath" - TreeService_GetSubTree_FullMethodName = "/tree.TreeService/GetSubTree" - TreeService_TreeList_FullMethodName = "/tree.TreeService/TreeList" - TreeService_Apply_FullMethodName = "/tree.TreeService/Apply" - TreeService_GetOpLog_FullMethodName = "/tree.TreeService/GetOpLog" - TreeService_Healthcheck_FullMethodName = "/tree.TreeService/Healthcheck" -) - -// TreeServiceClient is the client API for TreeService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type TreeServiceClient interface { - // Add adds new node to the tree. Invoked by a client. - Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) - // AddByPath adds new node to the tree by path. Invoked by a client. - AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error) - // Remove removes node from the tree. Invoked by a client. - Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) - // Move moves node from one parent to another. Invoked by a client. - Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error) - // GetNodeByPath returns list of IDs corresponding to a specific filepath. - GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error) - // GetSubTree returns tree corresponding to a specific node. - GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error) - // TreeList return list of the existing trees in the container. - TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error) - // Apply pushes log operation from another node to the current. - // The request must be signed by a container node. - Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) - // GetOpLog returns a stream of logged operations starting from some height. - GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error) - // Healthcheck is a dummy rpc to check service availability - Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error) -} - -type treeServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewTreeServiceClient(cc grpc.ClientConnInterface) TreeServiceClient { - return &treeServiceClient{cc} -} - -func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) { - out := new(AddResponse) - err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error) { - out := new(AddByPathResponse) - err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) { - out := new(RemoveResponse) - err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error) { - out := new(MoveResponse) - err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error) { - out := new(GetNodeByPathResponse) - err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error) { - stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &treeServiceGetSubTreeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TreeService_GetSubTreeClient interface { - Recv() (*GetSubTreeResponse, error) - grpc.ClientStream -} - -type treeServiceGetSubTreeClient struct { - grpc.ClientStream -} - -func (x *treeServiceGetSubTreeClient) Recv() (*GetSubTreeResponse, error) { - m := new(GetSubTreeResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error) { - out := new(TreeListResponse) - err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { - out := new(ApplyResponse) - err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *treeServiceClient) GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error) { - stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &treeServiceGetOpLogClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TreeService_GetOpLogClient interface { - Recv() (*GetOpLogResponse, error) - grpc.ClientStream -} - -type treeServiceGetOpLogClient struct { - grpc.ClientStream -} - -func (x *treeServiceGetOpLogClient) Recv() (*GetOpLogResponse, error) { - m := new(GetOpLogResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *treeServiceClient) Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error) { - out := new(HealthcheckResponse) - err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TreeServiceServer is the server API for TreeService service. -// All implementations should embed UnimplementedTreeServiceServer -// for forward compatibility -type TreeServiceServer interface { - // Add adds new node to the tree. Invoked by a client. - Add(context.Context, *AddRequest) (*AddResponse, error) - // AddByPath adds new node to the tree by path. Invoked by a client. - AddByPath(context.Context, *AddByPathRequest) (*AddByPathResponse, error) - // Remove removes node from the tree. Invoked by a client. - Remove(context.Context, *RemoveRequest) (*RemoveResponse, error) - // Move moves node from one parent to another. Invoked by a client. - Move(context.Context, *MoveRequest) (*MoveResponse, error) - // GetNodeByPath returns list of IDs corresponding to a specific filepath. - GetNodeByPath(context.Context, *GetNodeByPathRequest) (*GetNodeByPathResponse, error) - // GetSubTree returns tree corresponding to a specific node. - GetSubTree(*GetSubTreeRequest, TreeService_GetSubTreeServer) error - // TreeList return list of the existing trees in the container. - TreeList(context.Context, *TreeListRequest) (*TreeListResponse, error) - // Apply pushes log operation from another node to the current. - // The request must be signed by a container node. - Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) - // GetOpLog returns a stream of logged operations starting from some height. - GetOpLog(*GetOpLogRequest, TreeService_GetOpLogServer) error - // Healthcheck is a dummy rpc to check service availability - Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error) -} - -// UnimplementedTreeServiceServer should be embedded to have forward compatible implementations. -type UnimplementedTreeServiceServer struct { -} - -func (UnimplementedTreeServiceServer) Add(context.Context, *AddRequest) (*AddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Add not implemented") -} -func (UnimplementedTreeServiceServer) AddByPath(context.Context, *AddByPathRequest) (*AddByPathResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddByPath not implemented") -} -func (UnimplementedTreeServiceServer) Remove(context.Context, *RemoveRequest) (*RemoveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") -} -func (UnimplementedTreeServiceServer) Move(context.Context, *MoveRequest) (*MoveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Move not implemented") -} -func (UnimplementedTreeServiceServer) GetNodeByPath(context.Context, *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNodeByPath not implemented") -} -func (UnimplementedTreeServiceServer) GetSubTree(*GetSubTreeRequest, TreeService_GetSubTreeServer) error { - return status.Errorf(codes.Unimplemented, "method GetSubTree not implemented") -} -func (UnimplementedTreeServiceServer) TreeList(context.Context, *TreeListRequest) (*TreeListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TreeList not implemented") -} -func (UnimplementedTreeServiceServer) Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented") -} -func (UnimplementedTreeServiceServer) GetOpLog(*GetOpLogRequest, TreeService_GetOpLogServer) error { - return status.Errorf(codes.Unimplemented, "method GetOpLog not implemented") -} -func (UnimplementedTreeServiceServer) Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Healthcheck not implemented") -} - -// UnsafeTreeServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to TreeServiceServer will -// result in compilation errors. -type UnsafeTreeServiceServer interface { - mustEmbedUnimplementedTreeServiceServer() -} - -func RegisterTreeServiceServer(s grpc.ServiceRegistrar, srv TreeServiceServer) { - s.RegisterService(&TreeService_ServiceDesc, srv) -} - -func _TreeService_Add_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).Add(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_Add_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).Add(ctx, req.(*AddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_AddByPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddByPathRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).AddByPath(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_AddByPath_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).AddByPath(ctx, req.(*AddByPathRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).Remove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_Remove_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).Remove(ctx, req.(*RemoveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_Move_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MoveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).Move(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_Move_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).Move(ctx, req.(*MoveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_GetNodeByPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodeByPathRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).GetNodeByPath(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_GetNodeByPath_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).GetNodeByPath(ctx, req.(*GetNodeByPathRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_GetSubTree_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetSubTreeRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TreeServiceServer).GetSubTree(m, &treeServiceGetSubTreeServer{stream}) -} - -type TreeService_GetSubTreeServer interface { - Send(*GetSubTreeResponse) error - grpc.ServerStream -} - -type treeServiceGetSubTreeServer struct { - grpc.ServerStream -} - -func (x *treeServiceGetSubTreeServer) Send(m *GetSubTreeResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _TreeService_TreeList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TreeListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).TreeList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_TreeList_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).TreeList(ctx, req.(*TreeListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ApplyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).Apply(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_Apply_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).Apply(ctx, req.(*ApplyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TreeService_GetOpLog_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetOpLogRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TreeServiceServer).GetOpLog(m, &treeServiceGetOpLogServer{stream}) -} - -type TreeService_GetOpLogServer interface { - Send(*GetOpLogResponse) error - grpc.ServerStream -} - -type treeServiceGetOpLogServer struct { - grpc.ServerStream -} - -func (x *treeServiceGetOpLogServer) Send(m *GetOpLogResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _TreeService_Healthcheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthcheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TreeServiceServer).Healthcheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TreeService_Healthcheck_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TreeServiceServer).Healthcheck(ctx, req.(*HealthcheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// TreeService_ServiceDesc is the grpc.ServiceDesc for TreeService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var TreeService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "tree.TreeService", - HandlerType: (*TreeServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Add", - Handler: _TreeService_Add_Handler, - }, - { - MethodName: "AddByPath", - Handler: _TreeService_AddByPath_Handler, - }, - { - MethodName: "Remove", - Handler: _TreeService_Remove_Handler, - }, - { - MethodName: "Move", - Handler: _TreeService_Move_Handler, - }, - { - MethodName: "GetNodeByPath", - Handler: _TreeService_GetNodeByPath_Handler, - }, - { - MethodName: "TreeList", - Handler: _TreeService_TreeList_Handler, - }, - { - MethodName: "Apply", - Handler: _TreeService_Apply_Handler, - }, - { - MethodName: "Healthcheck", - Handler: _TreeService_Healthcheck_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "GetSubTree", - Handler: _TreeService_GetSubTree_Handler, - ServerStreams: true, - }, - { - StreamName: "GetOpLog", - Handler: _TreeService_GetOpLog_Handler, - ServerStreams: true, - }, - }, - Metadata: "pkg/services/tree/service.proto", -} diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go deleted file mode 100644 index 8221a4546..000000000 --- a/pkg/services/tree/signature.go +++ /dev/null @@ -1,196 +0,0 @@ -package tree - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - - core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type message interface { - SignedDataSize() int - ReadSignedData([]byte) ([]byte, error) - GetSignature() *Signature - SetSignature(*Signature) -} - -var ( - errBearerWrongContainer = errors.New("bearer token is created for another container") - errBearerSignature = errors.New("invalid bearer token signature") -) - -// verifyClient verifies if the request for a client operation -// was signed by a key allowed by (e)ACL rules. -// Operation must be one of: -// - 1. ObjectPut; -// - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { - err := verifyMessage(req) - if err != nil { - return err - } - - isAuthorized, err := s.isAuthorized(req, op) - if isAuthorized || err != nil { - return err - } - - cnr, err := s.cnrSource.Get(ctx, cid) - if err != nil { - return fmt.Errorf("can't get container %s: %w", cid, err) - } - - bt, err := parseBearer(rawBearer, cid) - if err != nil { - return fmt.Errorf("access to operation %s is denied: %w", op, err) - } - - role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt) - if err != nil { - return fmt.Errorf("can't get request role: %w", err) - } - - if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { - return apeErr(err) - } - return nil -} - -func apeErr(err error) error { - var chRouterErr *checkercore.ChainRouterError - if !errors.As(err, &chRouterErr) { - errServerInternal := &apistatus.ServerInternal{} - apistatus.WriteInternalServerErr(errServerInternal, err) - return errServerInternal - } - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(err.Error()) - return errAccessDenied -} - -// Returns true iff the operation is read-only and request was signed -// with one of the authorized keys. -func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { - if op != acl.OpObjectGet { - return false, nil - } - - sign := req.GetSignature() - if sign == nil { - return false, errors.New("missing signature") - } - - key := sign.GetKey() - for _, currentKey := range *s.authorizedKeys.Load() { - if bytes.Equal(currentKey, key) { - return true, nil - } - } - return false, nil -} - -func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) { - if len(rawBearer) == 0 { - return nil, nil - } - - bt := new(bearer.Token) - if err := bt.Unmarshal(rawBearer); err != nil { - return nil, fmt.Errorf("invalid bearer token: %w", err) - } - if !bt.AssertContainer(cid) { - return nil, errBearerWrongContainer - } - if !bt.VerifySignature() { - return nil, errBearerSignature - } - return bt, nil -} - -func verifyMessage(m message) error { - binBody, err := m.ReadSignedData(nil) - if err != nil { - return fmt.Errorf("marshal request body: %w", err) - } - - sig := m.GetSignature() - - // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion - var sigV2 refs.Signature - sigV2.SetKey(sig.GetKey()) - sigV2.SetSign(sig.GetSign()) - sigV2.SetScheme(refs.ECDSA_SHA512) - - var sigSDK frostfscrypto.Signature - if err := sigSDK.ReadFromV2(sigV2); err != nil { - return fmt.Errorf("can't read signature: %w", err) - } - - if !sigSDK.Verify(binBody) { - return errors.New("invalid signature") - } - return nil -} - -// SignMessage uses the provided key and signs any protobuf -// message that was generated for the TreeService by the -// protoc-gen-go-frostfs generator. Returns any errors directly. -func SignMessage(m message, key *ecdsa.PrivateKey) error { - binBody, err := m.ReadSignedData(nil) - if err != nil { - return err - } - - keySDK := frostfsecdsa.Signer(*key) - data, err := keySDK.Sign(binBody) - if err != nil { - return err - } - - rawPub := make([]byte, keySDK.Public().MaxEncodedSize()) - rawPub = rawPub[:keySDK.Public().Encode(rawPub)] - m.SetSignature(&Signature{ - Key: rawPub, - Sign: data, - }) - - return nil -} - -func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, *keys.PublicKey, error) { - role := acl.RoleOthers - owner := cnr.Value.Owner() - - rawKey := req.GetSignature().GetKey() - if bt != nil && bt.Impersonate() { - rawKey = bt.SigningKeyBytes() - } - - pub, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256()) - if err != nil { - return role, nil, fmt.Errorf("invalid public key: %w", err) - } - - var reqSigner user.ID - user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*pub)) - - if reqSigner.Equals(owner) { - role = acl.RoleOwner - } - - return role, pub, nil -} diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go deleted file mode 100644 index ca1e438cc..000000000 --- a/pkg/services/tree/signature_test.go +++ /dev/null @@ -1,442 +0,0 @@ -package tree - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "encoding/hex" - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" - aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" - "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -const versionTreeID = "version" - -type dummyNetmapSource struct { - netmap.Source -} - -type dummySubjectProvider struct { - subjects map[util.Uint160]client.SubjectExtended -} - -func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { - res := s.subjects[addr] - return &client.Subject{ - PrimaryKey: res.PrimaryKey, - AdditionalKeys: res.AdditionalKeys, - Namespace: res.Namespace, - Name: res.Name, - KV: res.KV, - }, nil -} - -func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { - res := s.subjects[addr] - return &res, nil -} - -type dummyEpochSource struct { - epoch uint64 -} - -func (s dummyEpochSource) CurrentEpoch() uint64 { - return s.epoch -} - -type dummyContainerSource map[string]*containercore.Container - -func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) { - res := make([]cid.ID, 0, len(s)) - var cnr cid.ID - - for cidStr := range s { - err := cnr.DecodeString(cidStr) - if err != nil { - return nil, err - } - - res = append(res, cnr) - } - - return res, nil -} - -func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) { - cnt, ok := s[id.String()] - if !ok { - return nil, errors.New("container not found") - } - return cnt, nil -} - -func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) { - return &containercore.DelInfo{}, nil -} - -func testContainer(owner user.ID) container.Container { - var r netmapSDK.ReplicaDescriptor - r.SetNumberOfObjects(1) - - var pp netmapSDK.PlacementPolicy - pp.AddReplicas(r) - - var cnt container.Container - cnt.SetOwner(owner) - cnt.SetPlacementPolicy(pp) - - return cnt -} - -const currentEpoch = 123 - -func TestMessageSign(t *testing.T) { - privs := make([]*keys.PrivateKey, 4) - for i := range privs { - p, err := keys.NewPrivateKey() - require.NoError(t, err) - privs[i] = p - } - - cid1 := cidtest.ID() - cid2 := cidtest.ID() - - var ownerID user.ID - user.IDFromKey(&ownerID, (ecdsa.PublicKey)(*privs[0].PublicKey())) - - cnr := &containercore.Container{ - Value: testContainer(ownerID), - } - - e := inmemory.NewInMemoryLocalOverrides() - e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{ - Type: engine.Container, - Name: cid1.EncodeToString(), - }, testChain(privs[0].PublicKey(), privs[1].PublicKey())) - frostfsidProvider := dummySubjectProvider{ - subjects: make(map[util.Uint160]client.SubjectExtended), - } - - s := &Service{ - cfg: cfg{ - log: test.NewLogger(t), - key: &privs[0].PrivateKey, - nmSource: dummyNetmapSource{}, - cnrSource: dummyContainerSource{ - cid1.String(): cnr, - }, - frostfsidSubjectProvider: frostfsidProvider, - state: dummyEpochSource{epoch: currentEpoch}, - }, - apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), - } - - s.cfg.authorizedKeys.Store(&[][]byte{}) - rawCID1 := make([]byte, sha256.Size) - cid1.Encode(rawCID1) - - req := &MoveRequest{ - Body: &MoveRequest_Body{ - ContainerId: rawCID1, - ParentId: 1, - NodeId: 2, - Meta: []KeyValue{ - {Key: "kkk", Value: []byte("vvv")}, - }, - }, - } - - op := acl.OpObjectPut - cnr.Value.SetBasicACL(acl.PublicRW) - - t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) - }) - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) - - t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) - }) - - cnr.Value.SetBasicACL(acl.Private) - - t.Run("extension disabled", func(t *testing.T) { - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) - }) - - t.Run("invalid key", func(t *testing.T) { - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) - }) - - t.Run("bearer", func(t *testing.T) { - bACL := acl.PrivateExtended - bACL.AllowBearerRules(op) - cnr.Value.SetBasicACL(bACL) - - bACL.DisableExtension() - - t.Run("invalid bearer", func(t *testing.T) { - req.Body.BearerToken = []byte{0xFF} - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - - t.Run("invalid bearer CID", func(t *testing.T) { - bt := testBearerToken(cid2, privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - t.Run("invalid bearer owner", func(t *testing.T) { - bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[1].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - t.Run("invalid bearer signature", func(t *testing.T) { - bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - - var bv2 aclV2.BearerToken - bt.WriteToV2(&bv2) - bv2.GetSignature().SetSign([]byte{1, 2, 3}) - req.Body.BearerToken = bv2.StableMarshal(nil) - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - - t.Run("omit override within bt", func(t *testing.T) { - t.Run("personated", func(t *testing.T) { - bt := testBearerTokenNoOverride() - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") - }) - - t.Run("impersonated", func(t *testing.T) { - bt := testBearerTokenNoOverride() - bt.SetImpersonate(true) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - }) - - t.Run("invalid override within bearer token", func(t *testing.T) { - t.Run("personated", func(t *testing.T) { - bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") - }) - - t.Run("impersonated", func(t *testing.T) { - bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) - bt.SetImpersonate(true) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") - }) - }) - - t.Run("impersonate", func(t *testing.T) { - cnr.Value.SetBasicACL(acl.PublicRWExtended) - var bt bearer.Token - bt.SetExp(10) - bt.SetImpersonate(true) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - - t.Run("impersonate but different signer", func(t *testing.T) { - var bt bearer.Token - bt.SetExp(10) - bt.SetImpersonate(true) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) - require.NoError(t, bt.Sign(privs[1].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - - t.Run("impersonate but different issuer", func(t *testing.T) { - var bt bearer.Token - bt.SetExp(10) - bt.SetImpersonate(true) - - differentUserPrivKey, err := keys.NewPrivateKey() - require.NoError(t, err) - - var reqSigner user.ID - user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*differentUserPrivKey.PublicKey())) - - bt.ForUser(reqSigner) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - - bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - cnr.Value.SetBasicACL(acl.PublicRWExtended) - - t.Run("put and get", func(t *testing.T) { - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - t.Run("only get", func(t *testing.T) { - require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - t.Run("none", func(t *testing.T) { - require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - }) -} - -func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - b.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid.EncodeToString(), - }, - Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, - }) - - return b -} - -func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - b.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - }, - Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, - }) - - return b -} - -func testBearerTokenNoOverride() bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - return b -} - -func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { - ruleGet := chain.Rule{ - Status: chain.Allow, - Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}}, - Actions: chain.Actions{Names: []string{native.MethodGetObject}}, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindRequest, - Key: native.PropertyKeyActorPublicKey, - Value: hex.EncodeToString(forPutGet.Bytes()), - }, - { - Op: chain.CondStringEquals, - Kind: chain.KindRequest, - Key: native.PropertyKeyActorPublicKey, - Value: hex.EncodeToString(forGet.Bytes()), - }, - }, - } - rulePut := chain.Rule{ - Status: chain.Allow, - Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}}, - Actions: chain.Actions{Names: []string{native.MethodPutObject}}, - Any: true, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindRequest, - Key: native.PropertyKeyActorPublicKey, - Value: hex.EncodeToString(forPutGet.Bytes()), - }, - }, - } - - return &chain.Chain{ - Rules: []chain.Rule{ - ruleGet, - rulePut, - }, - } -} diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go deleted file mode 100644 index 1480bff92..000000000 --- a/pkg/services/tree/sync.go +++ /dev/null @@ -1,581 +0,0 @@ -package tree - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "crypto/tls" - "errors" - "fmt" - "io" - "math" - "math/rand" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" -) - -// ErrNotInContainer is returned when operation could not be performed -// because the node is not included in the container. -var ErrNotInContainer = errors.New("node is not in container") - -const defaultSyncWorkerCount = 20 - -// synchronizeAllTrees synchronizes all the trees of the container. It fetches -// tree IDs from the other container nodes. Returns ErrNotInContainer if the node -// is not included in the container. -func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { - nodes, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return fmt.Errorf("can't get container nodes: %w", err) - } - - if pos < 0 { - return ErrNotInContainer - } - - nodes = randomizeNodeOrder(nodes, pos) - if len(nodes) == 0 { - return nil - } - - rawCID := make([]byte, sha256.Size) - cid.Encode(rawCID) - - req := &TreeListRequest{ - Body: &TreeListRequest_Body{ - ContainerId: rawCID, - }, - } - - err = SignMessage(req, s.key) - if err != nil { - return fmt.Errorf("could not sign request: %w", err) - } - - var resp *TreeListResponse - var treesToSync []string - var outErr error - - err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { - resp, outErr = c.TreeList(fCtx, req) - if outErr != nil { - return false - } - - treesToSync = resp.GetBody().GetIds() - - return true - }) - if err != nil { - outErr = err - } - - if outErr != nil { - return fmt.Errorf("could not fetch tree ID list: %w", outErr) - } - - for _, tid := range treesToSync { - h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid) - if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree, - zap.Stringer("cid", cid), - zap.String("tree", tid)) - continue - } - newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes) - if h < newHeight { - if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil { - s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, - zap.Stringer("cid", cid), - zap.String("tree", tid)) - } - } - } - - return nil -} - -// SynchronizeTree tries to synchronize log starting from the last stored height. -func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error { - nodes, pos, err := s.getContainerNodes(ctx, cid) - if err != nil { - return fmt.Errorf("can't get container nodes: %w", err) - } - - if pos < 0 { - return ErrNotInContainer - } - - nodes = randomizeNodeOrder(nodes, pos) - if len(nodes) == 0 { - return nil - } - - s.synchronizeTree(ctx, cid, 0, treeID, nodes) - return nil -} - -// mergeOperationStreams performs merge sort for node operation streams to one stream. -func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { - defer close(merged) - - // Merging different node streams shuffles incoming operations like that: - // - // x - operation from the stream A - // o - operation from the stream B - // - // --o---o--x--x--x--o---x--x------> t - // ^ - // If all ops have been successfully applied, we must start from the last - // operation height from the stream B. This height is stored in minStreamedLastHeight. - var minStreamedLastHeight uint64 = math.MaxUint64 - - ms := make([]*pilorama.Move, len(streams)) - for i := range streams { - select { - case ms[i] = <-streams[i]: - case <-ctx.Done(): - return minStreamedLastHeight - } - } - - for { - var minTimeMoveTime uint64 = math.MaxUint64 - minTimeMoveIndex := -1 - for i, m := range ms { - if m != nil && minTimeMoveTime > m.Time { - minTimeMoveTime = m.Time - minTimeMoveIndex = i - } - } - - if minTimeMoveIndex == -1 { - break - } - - select { - case merged <- ms[minTimeMoveIndex]: - case <-ctx.Done(): - return minStreamedLastHeight - } - height := ms[minTimeMoveIndex].Time - if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil { - minStreamedLastHeight = min(minStreamedLastHeight, height) - } - } - - return minStreamedLastHeight -} - -func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, - operationStream <-chan *pilorama.Move, -) (uint64, error) { - var prev *pilorama.Move - var batch []*pilorama.Move - for m := range operationStream { - // skip already applied op - if prev != nil && prev.Time == m.Time { - continue - } - prev = m - batch = append(batch, m) - - if len(batch) == s.syncBatchSize { - if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time, err - } - batch = batch[:0] - } - } - if len(batch) > 0 { - if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time, err - } - } - return math.MaxUint64, nil -} - -func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, - height uint64, cc *grpc.ClientConn, opsCh chan<- *pilorama.Move, -) error { - treeClient := NewTreeServiceClient(cc) - - rawCID := make([]byte, sha256.Size) - cid.Encode(rawCID) - - req := &GetOpLogRequest{ - Body: &GetOpLogRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Height: height, - }, - } - if err := SignMessage(req, s.key); err != nil { - return err - } - - c, err := treeClient.GetOpLog(ctx, req) - if err != nil { - return fmt.Errorf("can't initialize client: %w", err) - } - res, err := c.Recv() - for ; err == nil; res, err = c.Recv() { - lm := res.GetBody().GetOperation() - m := &pilorama.Move{ - Parent: lm.GetParentId(), - Child: lm.GetChildId(), - } - if err := m.FromBytes(lm.GetMeta()); err != nil { - return err - } - select { - case opsCh <- m: - case <-ctx.Done(): - return ctx.Err() - } - } - if !errors.Is(err, io.EOF) { - return err - } - return nil -} - -// synchronizeTree synchronizes operations getting them from different nodes. -// Each available node does stream operations to a separate stream. These streams -// are merged into one big stream ordered by operation time. This way allows to skip -// already applied operation and keep good batching. -// The method returns a height that service should start sync from in the next time. -func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, - treeID string, nodes []netmapSDK.NodeInfo, -) uint64 { - s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) - - errGroup, egCtx := errgroup.WithContext(ctx) - const workersCount = 1024 - errGroup.SetLimit(workersCount) - - nodeOperationStreams := make([]chan *pilorama.Move, len(nodes)) - for i := range nodeOperationStreams { - nodeOperationStreams[i] = make(chan *pilorama.Move) - } - merged := make(chan *pilorama.Move) - var minStreamedLastHeight uint64 - errGroup.Go(func() error { - minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged) - return nil - }) - var minUnappliedHeight uint64 - errGroup.Go(func() error { - var err error - minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged) - return err - }) - - var allNodesSynced atomic.Bool - allNodesSynced.Store(true) - - for i, n := range nodes { - errGroup.Go(func() error { - var nodeSynced bool - for addr := range n.NetworkEndpoints() { - var a network.Address - if err := a.FromString(addr); err != nil { - s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - continue - } - - cc, err := dialTreeService(ctx, a, s.key, s.ds) - if err != nil { - s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - continue - } - - err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) - if err != nil { - s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) - } - nodeSynced = err == nil - _ = cc.Close() - break - } - close(nodeOperationStreams[i]) - if !nodeSynced { - allNodesSynced.Store(false) - } - return nil - }) - } - if err := errGroup.Wait(); err != nil { - allNodesSynced.Store(false) - s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) - } - - newHeight := minStreamedLastHeight - if newHeight > minUnappliedHeight { - newHeight = minUnappliedHeight - } else { - newHeight++ - } - if allNodesSynced.Load() { - return newHeight - } - return from -} - -func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { - cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - defer cancel() - - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, key); err != nil { - return nil, err - } - - // perform some request to check connection - if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { - _ = cc.Close() - return nil, err - } - return cc, nil -} - -func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - host, isTLS, err := client.ParseURI(a.URIAddr()) - if err != nil { - return nil, err - } - - creds := insecure.NewCredentials() - if isTLS { - creds = credentials.NewTLS(&tls.Config{}) - } - - defaultOpts := []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), - metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), - ), - grpc.WithChainStreamInterceptor( - qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), - metrics.NewStreamClientInterceptor(), - tracing_grpc.NewStreamClientInterceptor(), - tagging.NewStreamClientInterceptor(), - ), - grpc.WithTransportCredentials(creds), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - grpc.WithDisableServiceConfig(), - } - - return grpc.NewClient(host, append(defaultOpts, opts...)...) -} - -// ErrAlreadySyncing is returned when a service synchronization has already -// been started. -var ErrAlreadySyncing = errors.New("service is being synchronized") - -// ErrShuttingDown is returned when the service is shitting down and could not -// accept any calls. -var ErrShuttingDown = errors.New("service is shutting down") - -// SynchronizeAll forces tree service to synchronize all the trees according to -// netmap information. Must not be called before Service.Start. -// Returns ErrAlreadySyncing if synchronization has been started and blocked -// by another routine. -// Note: non-blocking operation. -func (s *Service) SynchronizeAll() error { - select { - case <-s.closeCh: - return ErrShuttingDown - default: - } - if s.syncDisabled { - return nil - } - - select { - case s.syncChan <- struct{}{}: - return nil - default: - return ErrAlreadySyncing - } -} - -func (s *Service) syncLoop(ctx context.Context) { - for { - select { - case <-s.closeCh: - return - case <-ctx.Done(): - return - case <-s.syncChan: - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync") - s.log.Info(ctx, logs.TreeSyncingTrees) - - start := time.Now() - - cnrs, err := s.cnrSource.List(ctx) - if err != nil { - s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) - s.metrics.AddSyncDuration(time.Since(start), false) - span.End() - break - } - - newMap, cnrsToSync := s.containersToSync(ctx, cnrs) - - s.syncContainers(ctx, cnrsToSync) - - s.removeContainers(ctx, newMap) - - s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized) - - s.metrics.AddSyncDuration(time.Since(start), true) - span.End() - } - s.initialSyncDone.Store(true) - } -} - -func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.syncContainers") - defer span.End() - - // sync new containers - var wg sync.WaitGroup - for _, cnr := range cnrs { - wg.Add(1) - - err := s.syncPool.Submit(func() { - defer wg.Done() - s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) - - err := s.synchronizeAllTrees(ctx, cnr) - if err != nil { - s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) - return - } - - s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) - }) - if err != nil { - wg.Done() - s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization, - zap.Stringer("cid", cnr), - zap.Error(err)) - if errors.Is(err, ants.ErrPoolClosed) { - return - } - } - } - wg.Wait() -} - -func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID]struct{}) { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.removeContainers") - defer span.End() - - s.cnrMapMtx.Lock() - defer s.cnrMapMtx.Unlock() - - var removed []cid.ID - for cnr := range s.cnrMap { - if _, ok := newContainers[cnr]; ok { - continue - } - - existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr) - if err != nil { - s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted, - zap.Stringer("cid", cnr), - zap.Error(err)) - } else if existed { - removed = append(removed, cnr) - } - } - for i := range removed { - delete(s.cnrMap, removed[i]) - } - - for _, cnr := range removed { - s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) - - err := s.DropTree(ctx, cnr, "") - if err != nil { - s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree, - zap.Stringer("cid", cnr), - zap.Error(err)) - } - } -} - -func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { - newMap := make(map[cid.ID]struct{}, len(s.cnrMap)) - cnrsToSync := make([]cid.ID, 0, len(cnrs)) - - for _, cnr := range cnrs { - _, pos, err := s.getContainerNodes(ctx, cnr) - if err != nil { - s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, - zap.Stringer("cid", cnr), - zap.Error(err)) - continue - } - - if pos < 0 { - // node is not included in the container. - continue - } - - newMap[cnr] = struct{}{} - cnrsToSync = append(cnrsToSync, cnr) - } - return newMap, cnrsToSync -} - -// randomizeNodeOrder shuffles nodes and removes not a `pos` index. -// It is assumed that 0 <= pos < len(nodes). -func randomizeNodeOrder(cnrNodes []netmap.NodeInfo, pos int) []netmap.NodeInfo { - if len(cnrNodes) == 1 { - return nil - } - - nodes := make([]netmap.NodeInfo, len(cnrNodes)-1) - n := copy(nodes, cnrNodes[:pos]) - copy(nodes[n:], cnrNodes[pos+1:]) - - rand.Shuffle(len(nodes), func(i, j int) { - nodes[i], nodes[j] = nodes[j], nodes[i] - }) - return nodes -} diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go deleted file mode 100644 index 87d419408..000000000 --- a/pkg/services/tree/sync_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package tree - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "github.com/stretchr/testify/require" -) - -func Test_mergeOperationStreams(t *testing.T) { - tests := []struct { - name string - opTimes [][]uint64 - wantValues []uint64 - wantMinHeight uint64 - }{ - { - name: "1", - opTimes: [][]uint64{ - {250, 251, 255}, - {252, 253, 254, 256, 257}, - }, - wantValues: []uint64{250, 251, 252, 253, 254, 255, 256, 257}, - wantMinHeight: 255, - }, - { - name: "2", - opTimes: [][]uint64{ - {250, 251, 255, 259}, - {252, 253, 254, 256, 257}, - }, - wantValues: []uint64{250, 251, 252, 253, 254, 255, 256, 257, 259}, - wantMinHeight: 257, - }, - { - name: "3", - opTimes: [][]uint64{ - {250, 251, 255}, - {249, 250, 251, 253, 254, 256, 257}, - }, - wantValues: []uint64{249, 250, 250, 251, 251, 253, 254, 255, 256, 257}, - wantMinHeight: 255, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - nodeOpChans := make([]chan *pilorama.Move, len(tt.opTimes)) - for i := range nodeOpChans { - nodeOpChans[i] = make(chan *pilorama.Move) - } - - // generate and put values to all chans - for i, ch := range nodeOpChans { - go func() { - for _, tm := range tt.opTimes[i] { - op := &pilorama.Move{} - op.Time = tm - ch <- op - } - close(nodeOpChans[i]) - }() - } - - merged := make(chan *pilorama.Move, 1) - min := make(chan uint64) - go func() { - min <- mergeOperationStreams(context.Background(), nodeOpChans, merged) - }() - - var res []uint64 - for op := range merged { - res = append(res, op.Time) - } - require.Equal(t, tt.wantValues, res) - require.Equal(t, tt.wantMinHeight, <-min) - }) - } -} diff --git a/pkg/services/tree/types.proto b/pkg/services/tree/types.proto deleted file mode 100644 index f122c7cf4..000000000 --- a/pkg/services/tree/types.proto +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Auxiliary structures to use with tree service. - */ -syntax = "proto3"; - -package tree; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"; - -// KeyValue represents key-value pair attached to an object. -message KeyValue { - // Attribute name. - string key = 1 [ json_name = "key" ]; - // Attribute value. - bytes value = 2 [ json_name = "value" ]; -} - -// LogMove represents log-entry for a single move operation. -message LogMove { - // ID of the parent node. - uint64 parent_id = 1 [ json_name = "parentID" ]; - // Node meta information, including operation timestamp. - bytes meta = 2 [ json_name = "meta" ]; - // ID of the node to move. - uint64 child_id = 3 [ json_name = "childID" ]; -} - -// Signature of a message. -message Signature { - // Serialized public key as defined in FrostFS API. - bytes key = 1 [ json_name = "key" ]; - // Signature of a message body. - bytes sign = 2 [ json_name = "signature" ]; -} diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go deleted file mode 100644 index 2827b10a9..000000000 --- a/pkg/services/tree/types_frostfs.pb.go +++ /dev/null @@ -1,624 +0,0 @@ -// Code generated by protoc-gen-go-frostfs. DO NOT EDIT. - -package tree - -import ( - json "encoding/json" - fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" - easyproto "github.com/VictoriaMetrics/easyproto" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" - strconv "strconv" -) - -type KeyValue struct { - Key string `json:"key"` - Value []byte `json:"value"` -} - -var ( - _ encoding.ProtoMarshaler = (*KeyValue)(nil) - _ encoding.ProtoUnmarshaler = (*KeyValue)(nil) - _ json.Marshaler = (*KeyValue)(nil) - _ json.Unmarshaler = (*KeyValue)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *KeyValue) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.StringSize(1, x.Key) - size += proto.BytesSize(2, x.Value) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *KeyValue) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *KeyValue) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Key) != 0 { - mm.AppendString(1, x.Key) - } - if len(x.Value) != 0 { - mm.AppendBytes(2, x.Value) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *KeyValue) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "KeyValue") - } - switch fc.FieldNum { - case 1: // Key - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Key") - } - x.Key = data - case 2: // Value - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Value") - } - x.Value = data - } - } - return nil -} -func (x *KeyValue) GetKey() string { - if x != nil { - return x.Key - } - return "" -} -func (x *KeyValue) SetKey(v string) { - x.Key = v -} -func (x *KeyValue) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} -func (x *KeyValue) SetValue(v []byte) { - x.Value = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *KeyValue) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"key\":" - out.RawString(prefix) - out.String(x.Key) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"value\":" - out.RawString(prefix) - if x.Value != nil { - out.Base64Bytes(x.Value) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *KeyValue) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "key": - { - var f string - f = in.String() - x.Key = f - } - case "value": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Value = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type LogMove struct { - ParentId uint64 `json:"parentID"` - Meta []byte `json:"meta"` - ChildId uint64 `json:"childID"` -} - -var ( - _ encoding.ProtoMarshaler = (*LogMove)(nil) - _ encoding.ProtoUnmarshaler = (*LogMove)(nil) - _ json.Marshaler = (*LogMove)(nil) - _ json.Unmarshaler = (*LogMove)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *LogMove) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.UInt64Size(1, x.ParentId) - size += proto.BytesSize(2, x.Meta) - size += proto.UInt64Size(3, x.ChildId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *LogMove) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *LogMove) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.ParentId != 0 { - mm.AppendUint64(1, x.ParentId) - } - if len(x.Meta) != 0 { - mm.AppendBytes(2, x.Meta) - } - if x.ChildId != 0 { - mm.AppendUint64(3, x.ChildId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *LogMove) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "LogMove") - } - switch fc.FieldNum { - case 1: // ParentId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ParentId") - } - x.ParentId = data - case 2: // Meta - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Meta") - } - x.Meta = data - case 3: // ChildId - data, ok := fc.Uint64() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ChildId") - } - x.ChildId = data - } - } - return nil -} -func (x *LogMove) GetParentId() uint64 { - if x != nil { - return x.ParentId - } - return 0 -} -func (x *LogMove) SetParentId(v uint64) { - x.ParentId = v -} -func (x *LogMove) GetMeta() []byte { - if x != nil { - return x.Meta - } - return nil -} -func (x *LogMove) SetMeta(v []byte) { - x.Meta = v -} -func (x *LogMove) GetChildId() uint64 { - if x != nil { - return x.ChildId - } - return 0 -} -func (x *LogMove) SetChildId(v uint64) { - x.ChildId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *LogMove) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"parentID\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) - out.RawByte('"') - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"meta\":" - out.RawString(prefix) - if x.Meta != nil { - out.Base64Bytes(x.Meta) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"childID\":" - out.RawString(prefix) - out.RawByte('"') - out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10) - out.RawByte('"') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *LogMove) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "parentID": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.ParentId = f - } - case "meta": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Meta = f - } - case "childID": - { - var f uint64 - r := in.JsonNumber() - n := r.String() - v, err := strconv.ParseUint(n, 10, 64) - if err != nil { - in.AddError(err) - return - } - pv := uint64(v) - f = pv - x.ChildId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type Signature struct { - Key []byte `json:"key"` - Sign []byte `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*Signature)(nil) - _ encoding.ProtoUnmarshaler = (*Signature)(nil) - _ json.Marshaler = (*Signature)(nil) - _ json.Unmarshaler = (*Signature)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *Signature) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.BytesSize(1, x.Key) - size += proto.BytesSize(2, x.Sign) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *Signature) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.Key) != 0 { - mm.AppendBytes(1, x.Key) - } - if len(x.Sign) != 0 { - mm.AppendBytes(2, x.Sign) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *Signature) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "Signature") - } - switch fc.FieldNum { - case 1: // Key - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Key") - } - x.Key = data - case 2: // Sign - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Sign") - } - x.Sign = data - } - } - return nil -} -func (x *Signature) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} -func (x *Signature) SetKey(v []byte) { - x.Key = v -} -func (x *Signature) GetSign() []byte { - if x != nil { - return x.Sign - } - return nil -} -func (x *Signature) SetSign(v []byte) { - x.Sign = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *Signature) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"key\":" - out.RawString(prefix) - if x.Key != nil { - out.Base64Bytes(x.Key) - } else { - out.String("") - } - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - if x.Sign != nil { - out.Base64Bytes(x.Sign) - } else { - out.String("") - } - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *Signature) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "key": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Key = f - } - case "signature": - { - var f []byte - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - x.Sign = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go deleted file mode 100644 index 5152a8ece..000000000 --- a/pkg/services/util/response/service.go +++ /dev/null @@ -1,39 +0,0 @@ -package response - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" -) - -// Service represents universal v2 service -// that sets response meta header values. -type Service struct { - version refs.Version - - state netmap.State -} - -// NewService creates, initializes and returns Service instance. -func NewService(nmState netmap.State) *Service { - s := &Service{state: nmState} - version.Current().WriteToV2(&s.version) - return s -} - -// SetMeta sets adds meta-header to resp. -func (s *Service) SetMeta(resp util.ResponseMessage) { - meta := new(session.ResponseMetaHeader) - meta.SetVersion(&s.version) - meta.SetTTL(1) // FIXME: #1160 TTL must be calculated - meta.SetEpoch(s.state.CurrentEpoch()) - - if origin := resp.GetMetaHeader(); origin != nil { - // FIXME: #1160 what if origin is set by local server? - meta.SetOrigin(origin) - } - - resp.SetMetaHeader(meta) -} diff --git a/pkg/services/util/server.go b/pkg/services/util/server.go deleted file mode 100644 index 83ab323f7..000000000 --- a/pkg/services/util/server.go +++ /dev/null @@ -1,10 +0,0 @@ -package util - -import ( - "context" -) - -// ServerStream is an interface of server-side stream v2. -type ServerStream interface { - Context() context.Context -} diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go deleted file mode 100644 index 348a45a94..000000000 --- a/pkg/services/util/sign.go +++ /dev/null @@ -1,76 +0,0 @@ -package util - -import ( - "crypto/ecdsa" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -type RequestMessage interface { - GetMetaHeader() *session.RequestMetaHeader -} - -// ResponseMessage is an interface of FrostFS response message. -type ResponseMessage interface { - GetMetaHeader() *session.ResponseMetaHeader - SetMetaHeader(*session.ResponseMetaHeader) -} - -type SignService struct { - key *ecdsa.PrivateKey -} - -var ErrAbortStream = errors.New("abort message stream") - -func NewUnarySignService(key *ecdsa.PrivateKey) *SignService { - return &SignService{ - key: key, - } -} - -// SignResponse response with private key via signature.SignServiceMessage. -// The signature error affects the result depending on the protocol version: -// - if status return is supported, panics since we cannot return the failed status, because it will not be signed. -// - otherwise, returns error in order to transport it directly. -func (s *SignService) SignResponse(resp ResponseMessage, err error) error { - if err != nil { - setStatusV2(resp, err) - } - - err = signature.SignServiceMessage(s.key, resp) - if err != nil { - return fmt.Errorf("could not sign response: %w", err) - } - - return nil -} - -func (s *SignService) VerifyRequest(req RequestMessage) error { - if err := signature.VerifyServiceMessage(req); err != nil { - sigErr := new(apistatus.SignatureVerification) - sigErr.SetMessage(err.Error()) - return sigErr - } - return nil -} - -// EnsureNonNilResponse creates an appropriate response struct if it is nil. -func EnsureNonNilResponse[T any](resp *T, err error) (*T, error) { - if resp != nil { - return resp, err - } - return new(T), err -} - -func setStatusV2(resp ResponseMessage, err error) { - // unwrap error - for e := errors.Unwrap(err); e != nil; e = errors.Unwrap(err) { - err = e - } - - session.SetStatus(resp, apistatus.ToStatusV2(apistatus.ErrToStatus(err))) -} diff --git a/pkg/tracing/trace.go b/pkg/tracing/trace.go deleted file mode 100644 index dc2f90259..000000000 --- a/pkg/tracing/trace.go +++ /dev/null @@ -1,19 +0,0 @@ -package tracing - -import ( - "context" - - "go.opentelemetry.io/otel/trace" -) - -var emptyTraceID = [16]byte{} - -// GetTraceID retrieves the trace ID from the provided context. -// It returns an empty string if no trace ID is found. -func GetTraceID(ctx context.Context) string { - span := trace.SpanFromContext(ctx) - if span == nil || span.SpanContext().TraceID() == emptyTraceID { - return "" - } - return span.SpanContext().TraceID().String() -} diff --git a/pkg/util/ape/converter.go b/pkg/util/ape/converter.go deleted file mode 100644 index c706cf052..000000000 --- a/pkg/util/ape/converter.go +++ /dev/null @@ -1,280 +0,0 @@ -package ape - -import ( - "encoding/hex" - "fmt" - - v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" -) - -type ConvertEACLError struct { - nested error -} - -func (e *ConvertEACLError) Error() string { - if e == nil { - return "" - } - return "failed to convert eACL table to policy engine chain: " + e.nested.Error() -} - -func (e *ConvertEACLError) Unwrap() error { - if e == nil { - return nil - } - return e.nested -} - -// ConvertEACLToAPE converts eacl.Table to apechain.Chain. -func ConvertEACLToAPE(eaclTable *eacl.Table) (*apechain.Chain, error) { - if eaclTable == nil { - return nil, nil - } - res := &apechain.Chain{ - MatchType: apechain.MatchTypeFirstMatch, - } - - resource := getResource(eaclTable) - - for _, eaclRecord := range eaclTable.Records() { - if len(eaclRecord.Targets()) == 0 { - // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101 - // and https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L36 - // such record doesn't have any effect - continue - } - - st, err := actionToStatus(eaclRecord.Action()) - if err != nil { - return nil, err - } - act, err := operationToAction(eaclRecord.Operation()) - if err != nil { - return nil, err - } - - if len(eaclRecord.Filters()) == 0 { - res.Rules = appendTargetsOnly(res.Rules, st, act, resource, eaclRecord.Targets()) - } else { - res.Rules, err = appendTargetsAndFilters(res.Rules, st, act, resource, eaclRecord.Targets(), eaclRecord.Filters()) - if err != nil { - return nil, err - } - } - } - - return res, nil -} - -func apeRoleConds(role eacl.Role) (res []apechain.Condition) { - switch role { - case eacl.RoleSystem: - res = append(res, - apechain.Condition{ - Op: apechain.CondStringEquals, - Kind: apechain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleContainer, - }, - ) - res = append(res, - apechain.Condition{ - Op: apechain.CondStringEquals, - Kind: apechain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleIR, - }, - ) - case eacl.RoleOthers: - res = append(res, - apechain.Condition{ - Op: apechain.CondStringEquals, - Kind: apechain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOthers, - }, - ) - case eacl.RoleUser: - res = append(res, - apechain.Condition{ - Op: apechain.CondStringEquals, - Kind: apechain.KindRequest, - Key: nativeschema.PropertyKeyActorRole, - Value: nativeschema.PropertyValueContainerRoleOwner, - }, - ) - case eacl.RoleUnknown: - // such condition has no effect - default: - } - return -} - -func appendTargetsOnly(source []apechain.Rule, st apechain.Status, act apechain.Actions, res apechain.Resources, targets []eacl.Target) []apechain.Rule { - // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101 - // role OR public key must be equal - rule := apechain.Rule{ - Status: st, - Actions: act, - Resources: res, - Any: true, - } - for _, target := range targets { - rule.Condition = append(rule.Condition, apeRoleConds(target.Role())...) - for _, binKey := range target.BinaryKeys() { - var pubKeyCondition apechain.Condition - pubKeyCondition.Kind = apechain.KindRequest - pubKeyCondition.Key = nativeschema.PropertyKeyActorPublicKey - pubKeyCondition.Value = hex.EncodeToString(binKey) - pubKeyCondition.Op = apechain.CondStringEquals - rule.Condition = append(rule.Condition, pubKeyCondition) - } - } - return append(source, rule) -} - -func appendTargetsAndFilters(source []apechain.Rule, st apechain.Status, act apechain.Actions, res apechain.Resources, - targets []eacl.Target, filters []eacl.Filter, -) ([]apechain.Rule, error) { - // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101 - // role OR public key must be equal - // so filters are repeated for each role and public key - var err error - for _, target := range targets { - rule := apechain.Rule{ - Status: st, - Actions: act, - Resources: res, - } - rule.Condition = append(rule.Condition, apeRoleConds(target.Role())...) - rule.Condition, err = appendFilters(rule.Condition, filters) - if err != nil { - return nil, err - } - - source = append(source, rule) - - for _, binKey := range target.BinaryKeys() { - rule := apechain.Rule{ - Status: st, - Actions: act, - Resources: res, - } - var pubKeyCondition apechain.Condition - pubKeyCondition.Kind = apechain.KindRequest - pubKeyCondition.Key = nativeschema.PropertyKeyActorPublicKey - pubKeyCondition.Value = hex.EncodeToString(binKey) - pubKeyCondition.Op = apechain.CondStringEquals - - rule.Condition = append(rule.Condition, pubKeyCondition) - rule.Condition, err = appendFilters(rule.Condition, filters) - if err != nil { - return nil, err - } - - source = append(source, rule) - } - } - - return source, nil -} - -func appendFilters(source []apechain.Condition, filters []eacl.Filter) ([]apechain.Condition, error) { - for _, filter := range filters { - var cond apechain.Condition - var isObject bool - if filter.From() == eacl.HeaderFromObject { - cond.Kind = apechain.KindResource - isObject = true - } else if filter.From() == eacl.HeaderFromRequest { - cond.Kind = apechain.KindRequest - } else { - return nil, &ConvertEACLError{nested: fmt.Errorf("unknown filter from: %d", filter.From())} - } - - if filter.Matcher() == eacl.MatchStringEqual { - cond.Op = apechain.CondStringEquals - } else if filter.Matcher() == eacl.MatchStringNotEqual { - cond.Op = apechain.CondStringNotEquals - } else { - return nil, &ConvertEACLError{nested: fmt.Errorf("unknown filter matcher: %d", filter.Matcher())} - } - - cond.Key = eaclKeyToAPEKey(filter.Key(), isObject) - cond.Value = filter.Value() - - source = append(source, cond) - } - return source, nil -} - -func eaclKeyToAPEKey(key string, isObject bool) string { - if !isObject { - return key - } - switch key { - default: - return key - case v2acl.FilterObjectVersion: - return nativeschema.PropertyKeyObjectVersion - case v2acl.FilterObjectID: - return nativeschema.PropertyKeyObjectID - case v2acl.FilterObjectContainerID: - return nativeschema.PropertyKeyObjectContainerID - case v2acl.FilterObjectOwnerID: - return nativeschema.PropertyKeyObjectOwnerID - case v2acl.FilterObjectCreationEpoch: - return nativeschema.PropertyKeyObjectCreationEpoch - case v2acl.FilterObjectPayloadLength: - return nativeschema.PropertyKeyObjectPayloadLength - case v2acl.FilterObjectPayloadHash: - return nativeschema.PropertyKeyObjectPayloadHash - case v2acl.FilterObjectType: - return nativeschema.PropertyKeyObjectType - case v2acl.FilterObjectHomomorphicHash: - return nativeschema.PropertyKeyObjectHomomorphicHash - } -} - -func getResource(eaclTable *eacl.Table) apechain.Resources { - cnrID, isSet := eaclTable.CID() - if isSet { - return apechain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, - } - } - return apechain.Resources{ - Names: []string{nativeschema.ResourceFormatRootObjects}, - } -} - -func actionToStatus(a eacl.Action) (apechain.Status, error) { - switch a { - case eacl.ActionAllow: - return apechain.Allow, nil - case eacl.ActionDeny: - return apechain.AccessDenied, nil - default: - return apechain.NoRuleFound, &ConvertEACLError{nested: fmt.Errorf("unknown action: %d", a)} - } -} - -var eaclOperationToEngineAction = map[eacl.Operation]apechain.Actions{ - eacl.OperationGet: {Names: []string{nativeschema.MethodGetObject}}, - eacl.OperationHead: {Names: []string{nativeschema.MethodHeadObject}}, - eacl.OperationPut: {Names: []string{nativeschema.MethodPutObject}}, - eacl.OperationDelete: {Names: []string{nativeschema.MethodDeleteObject}}, - eacl.OperationSearch: {Names: []string{nativeschema.MethodSearchObject}}, - eacl.OperationRange: {Names: []string{nativeschema.MethodRangeObject}}, - eacl.OperationRangeHash: {Names: []string{nativeschema.MethodHashObject}}, -} - -func operationToAction(op eacl.Operation) (apechain.Actions, error) { - if v, ok := eaclOperationToEngineAction[op]; ok { - return v, nil - } - return apechain.Actions{}, &ConvertEACLError{nested: fmt.Errorf("unknown operation: %d", op)} -} diff --git a/pkg/util/ape/converter_test.go b/pkg/util/ape/converter_test.go deleted file mode 100644 index 28125606c..000000000 --- a/pkg/util/ape/converter_test.go +++ /dev/null @@ -1,471 +0,0 @@ -package ape - -import ( - "encoding/hex" - "fmt" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestEACLTableWithoutRecords(t *testing.T) { - t.Parallel() - - tb := eacl.NewTable() - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - req := &testRequest{ - res: &testResource{name: nativeschema.ResourceFormatRootObjects}, - } - - compare(t, vu, ch, req) - - cnrID := cidtest.ID() - tb.SetCID(cnrID) - vu.WithContainerID(&cnrID) - req.res.name = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()) - - ch, err = ConvertEACLToAPE(tb) - require.NoError(t, err) - - compare(t, vu, ch, req) -} - -func TestNoTargets(t *testing.T) { - t.Parallel() - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleOthers) - - // deny delete without role or key specified - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - record.AddObjectContainerIDFilter(eacl.MatchStringEqual, cnrID) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: eacl.RoleOthers.String(), - }, - res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, - } - compare(t, vu, ch, req) - } -} - -func TestNoFilters(t *testing.T) { - t.Parallel() - - t.Run("target match by role only", func(t *testing.T) { - t.Parallel() - - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleOthers) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - record.SetTargets(*target) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers, - }, - res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, - } - compare(t, vu, ch, req) - } - }) - - t.Run("target match by role and public key", func(t *testing.T) { - t.Parallel() - - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleOthers) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - - p1, err := keys.NewPrivateKey() - require.NoError(t, err) - p2, err := keys.NewPrivateKey() - require.NoError(t, err) - - vu.WithSenderKey(p2.PublicKey().Bytes()) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - target.SetBinaryKeys([][]byte{p1.PublicKey().Bytes(), p2.PublicKey().Bytes()}) - record.SetTargets(*target) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers, - nativeschema.PropertyKeyActorPublicKey: string(p2.PublicKey().Bytes()), - }, - res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, - } - compare(t, vu, ch, req) - } - }) - - t.Run("target match by public key only", func(t *testing.T) { - t.Parallel() - - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - - p1, err := keys.NewPrivateKey() - require.NoError(t, err) - p2, err := keys.NewPrivateKey() - require.NoError(t, err) - - vu.WithSenderKey(p2.PublicKey().Bytes()) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - target.SetBinaryKeys([][]byte{p1.PublicKey().Bytes(), p2.PublicKey().Bytes()}) - record.SetTargets(*target) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(p2.PublicKey().Bytes()), - }, - res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, - } - compare(t, vu, ch, req) - } - }) - - t.Run("target doesn't match", func(t *testing.T) { - t.Parallel() - - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleSystem) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - record.SetTargets(*target) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: eacl.RoleSystem.String(), - }, - res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, - } - compare(t, vu, ch, req) - } - }) -} - -func TestWithFilters(t *testing.T) { - t.Parallel() - - t.Run("object attributes", func(t *testing.T) { - t.Parallel() - - const attrKey = "attribute_1" - const attrValue = "attribute_1_value" - - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleOthers) - vu.WithHeaderSource(&testHeaderSource{ - headers: map[eacl.FilterHeaderType][]eacl.Header{ - eacl.HeaderFromObject: {&testHeader{key: attrKey, value: attrValue}}, - }, - }) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - record.SetTargets(*target) - - record.AddObjectAttributeFilter(eacl.MatchStringEqual, attrKey, attrValue) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers, - }, - res: &testResource{ - name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()), - props: map[string]string{ - attrKey: attrValue, - }, - }, - } - compare(t, vu, ch, req) - } - }) - - t.Run("request attributes", func(t *testing.T) { - t.Parallel() - - const attrKey = "attribute_1" - const attrValue = "attribute_1_value" - - for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} { - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleOthers) - vu.WithHeaderSource(&testHeaderSource{ - headers: map[eacl.FilterHeaderType][]eacl.Header{ - eacl.HeaderFromRequest: {&testHeader{key: attrKey, value: attrValue}}, - }, - }) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(act) - record.SetOperation(eacl.OperationDelete) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - record.SetTargets(*target) - - record.AddFilter(eacl.HeaderFromRequest, eacl.MatchStringEqual, attrKey, attrValue) - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers, - attrKey: attrValue, - }, - res: &testResource{ - name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()), - }, - } - compare(t, vu, ch, req) - } - }) -} - -func TestNoHeader(t *testing.T) { - t.Skip("Should pass after https://git.frostfs.info/TrueCloudLab/policy-engine/issues/8#issuecomment-26126") - - t.Parallel() - - const attrKey = "attribute_1" - cnrID := cidtest.ID() - tb := eacl.NewTable() - tb.SetCID(cnrID) - - vu := &eacl.ValidationUnit{} - vu.WithEACLTable(tb) - vu.WithContainerID(&cnrID) - vu.WithRole(eacl.RoleOthers) - vu.WithHeaderSource(&testHeaderSource{ - headers: map[eacl.FilterHeaderType][]eacl.Header{ - eacl.HeaderFromRequest: {}, - }, - }) - - // allow/deny for OTHERS - record := eacl.NewRecord() - record.SetAction(eacl.ActionDeny) - record.SetOperation(eacl.OperationDelete) - - target := eacl.NewTarget() - target.SetRole(eacl.RoleOthers) - record.SetTargets(*target) - - record.AddFilter(eacl.HeaderFromRequest, eacl.MatchStringEqual, attrKey, "") - - tb.AddRecord(record) - - ch, err := ConvertEACLToAPE(tb) - require.NoError(t, err) - - req := &testRequest{ - props: map[string]string{ - nativeschema.PropertyKeyActorRole: eacl.RoleOthers.String(), - }, - res: &testResource{ - name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()), - }, - } - compare(t, vu, ch, req) -} - -func compare(t *testing.T, vu *eacl.ValidationUnit, ch *apechain.Chain, req *testRequest) { - validator := eacl.NewValidator() - for eaclOp, apeOp := range eaclOperationToEngineAction { - vu.WithOperation(eaclOp) - req.op = apeOp.Names[0] - - eaclAct, recordFound := validator.CalculateAction(vu) - apeSt, ruleFound := ch.Match(req) - - require.Equal(t, recordFound, ruleFound) - require.NotEqual(t, eacl.ActionUnknown, eaclAct) - if eaclAct == eacl.ActionAllow { - if recordFound { - require.Equal(t, apechain.Allow, apeSt) - } else { - require.Equal(t, apechain.NoRuleFound, apeSt) - } - } else { - require.Equal(t, apechain.AccessDenied, apeSt) - } - } -} - -type testRequest struct { - op string - props map[string]string - res *testResource -} - -func (r *testRequest) Operation() string { - return r.op -} - -func (r *testRequest) Property(key string) string { - if v, ok := r.props[key]; ok { - return v - } - return "" -} - -func (r *testRequest) Resource() resource.Resource { - return r.res -} - -type testResource struct { - name string - props map[string]string -} - -func (r *testResource) Name() string { - return r.name -} - -func (r *testResource) Property(key string) string { - if v, ok := r.props[key]; ok { - return v - } - return "" -} - -type testHeaderSource struct { - headers map[eacl.FilterHeaderType][]eacl.Header -} - -func (s *testHeaderSource) HeadersOfType(t eacl.FilterHeaderType) ([]eacl.Header, bool) { - v, ok := s.headers[t] - return v, ok -} - -type testHeader struct { - key, value string -} - -func (h *testHeader) Key() string { return h.key } -func (h *testHeader) Value() string { return h.value } diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go deleted file mode 100644 index 6f114d45b..000000000 --- a/pkg/util/ape/parser.go +++ /dev/null @@ -1,321 +0,0 @@ -package ape - -import ( - "errors" - "fmt" - "os" - "strings" - - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/flynn-archive/go-shlex" -) - -var ( - errInvalidStatementFormat = errors.New("invalid statement format") - errInvalidConditionFormat = errors.New("invalid condition format") - errUnknownStatus = errors.New("status is not recognized") - errUnknownStatusDetail = errors.New("status detail is not recognized") - errUnknownAction = errors.New("action is not recognized") - errUnknownBinaryOperator = errors.New("binary operator is not recognized") - errUnknownCondObjectType = errors.New("condition object type is not recognized") - errMixedTypesInRule = errors.New("found mixed type of actions in rule") - errNoActionsInRule = errors.New("there are no actions in rule") - errUnsupportedResourceFormat = errors.New("unsupported resource format") - errFailedToParseAllAny = errors.New("any/all is not parsed") -) - -func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error { - data, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("read file <%s>: %w", path, err) - } - - err = chain.UnmarshalBinary(data) - if err != nil { - err = chain.UnmarshalJSON(data) - if err != nil { - return fmt.Errorf("invalid format: %w", err) - } - } - - return nil -} - -// ParseAPEChain parses APE chain rules. -func ParseAPEChain(chain *apechain.Chain, rules []string) error { - if len(rules) == 0 { - return errors.New("no APE rules provided") - } - - for _, rule := range rules { - r := new(apechain.Rule) - if err := ParseAPERule(r, rule); err != nil { - return err - } - chain.Rules = append(chain.Rules, *r) - } - - return nil -} - -// ParseAPERule parses access-policy-engine statement from the following form: -// [:status_detail] ... [...] ... -// -// Examples: -// deny Object.Put * -// deny:QuotaLimitReached Object.Put * -// allow Object.Put * -// allow Object.Get ResourceCondition:Department=HR RequestCondition:Actor=ownerA * -// allow Object.Get any ResourceCondition:Department=HR RequestCondition:Actor=ownerA * -// allow Object.Get all ResourceCondition:Department=HR RequestCondition:Actor=ownerA * -// allow Object.* * -// allow Container.* * -// -//nolint:godot -func ParseAPERule(r *apechain.Rule, rule string) error { - lexemes, err := shlex.Split(rule) - if err != nil { - return fmt.Errorf("can't parse rule '%s': %v", rule, err) - } - return parseRuleLexemes(r, lexemes) -} - -func unique(inputSlice []string) []string { - uniqueSlice := make([]string, 0, len(inputSlice)) - seen := make(map[string]bool, len(inputSlice)) - for _, element := range inputSlice { - if !seen[element] { - uniqueSlice = append(uniqueSlice, element) - seen[element] = true - } - } - return uniqueSlice -} - -func parseRuleLexemes(r *apechain.Rule, lexemes []string) error { - if len(lexemes) < 2 { - return errInvalidStatementFormat - } - - var err error - r.Status, err = parseStatus(lexemes[0]) - if err != nil { - return err - } - - var objectTargeted bool - var containerTargeted bool - - for i, lexeme := range lexemes[1:] { - anyExpr, anyErr := parseAnyAll(lexeme) - if anyErr == nil { - r.Any = anyExpr - continue - } - - var names []string - var actionType bool - names, actionType, err = parseAction(lexeme) - if err != nil { - condition, errCond := parseCondition(lexeme) - if errCond != nil { - err = fmt.Errorf("%w:%w", err, errCond) - lexemes = lexemes[i+1:] - break - } - r.Condition = append(r.Condition, *condition) - } else { - if actionType { - objectTargeted = true - } else { - containerTargeted = true - } - if objectTargeted && containerTargeted { - // Actually, APE chain allows to define rules for several resources, for example, if - // chain target is namespace, but the parser primitevly compiles verbs, - // conditions and resources in one rule. So, for the parser, one rule relates only to - // one resource type - object or container. - return errMixedTypesInRule - } - - r.Actions.Names = append(r.Actions.Names, names...) - } - } - r.Actions.Names = unique(r.Actions.Names) - if len(r.Actions.Names) == 0 { - return fmt.Errorf("%w:%w", err, errNoActionsInRule) - } - for _, lexeme := range lexemes { - resource, errRes := parseResource(lexeme, objectTargeted) - if errRes != nil { - return fmt.Errorf("%w:%w", err, errRes) - } - r.Resources.Names = append(r.Resources.Names, resource) - } - - return nil -} - -func parseAnyAll(lexeme string) (bool, error) { - switch strings.ToLower(lexeme) { - case "any": - return true, nil - case "all": - return false, nil - default: - return false, errFailedToParseAllAny - } -} - -func parseStatus(lexeme string) (apechain.Status, error) { - action, expression, found := strings.Cut(lexeme, ":") - switch strings.ToLower(action) { - case "deny": - if !found { - return apechain.AccessDenied, nil - } - if strings.EqualFold(expression, "QuotaLimitReached") { - return apechain.QuotaLimitReached, nil - } - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) - case "allow": - if found { - return 0, errUnknownStatusDetail - } - return apechain.Allow, nil - default: - return 0, errUnknownStatus - } -} - -func parseAction(lexeme string) ([]string, bool, error) { - switch strings.ToLower(lexeme) { - case "object.put": - return []string{nativeschema.MethodPutObject}, true, nil - case "object.get": - return []string{nativeschema.MethodGetObject}, true, nil - case "object.head": - return []string{nativeschema.MethodHeadObject}, true, nil - case "object.delete": - return []string{nativeschema.MethodDeleteObject}, true, nil - case "object.search": - return []string{nativeschema.MethodSearchObject}, true, nil - case "object.range": - return []string{nativeschema.MethodRangeObject}, true, nil - case "object.hash": - return []string{nativeschema.MethodHashObject}, true, nil - case "object.patch": - return []string{nativeschema.MethodPatchObject}, true, nil - case "object.*": - return []string{ - nativeschema.MethodPutObject, - nativeschema.MethodGetObject, - nativeschema.MethodHeadObject, - nativeschema.MethodDeleteObject, - nativeschema.MethodSearchObject, - nativeschema.MethodRangeObject, - nativeschema.MethodHashObject, - nativeschema.MethodPatchObject, - }, true, nil - case "container.put": - return []string{nativeschema.MethodPutContainer}, false, nil - case "container.delete": - return []string{nativeschema.MethodDeleteContainer}, false, nil - case "container.get": - return []string{nativeschema.MethodGetContainer}, false, nil - case "container.list": - return []string{nativeschema.MethodListContainers}, false, nil - case "container.*": - return []string{ - nativeschema.MethodPutContainer, - nativeschema.MethodDeleteContainer, - nativeschema.MethodGetContainer, - nativeschema.MethodListContainers, - }, false, nil - default: - } - return nil, false, fmt.Errorf("%w: %s", errUnknownAction, lexeme) -} - -func parseResource(lexeme string, isObj bool) (string, error) { - if len(lexeme) > 0 && !strings.HasSuffix(lexeme, "/") { - if isObj { - if lexeme == "*" { - return nativeschema.ResourceFormatAllObjects, nil - } else if lexeme == "/*" || lexeme == "root/*" { - return nativeschema.ResourceFormatRootObjects, nil - } else if strings.HasPrefix(lexeme, "/") { - lexeme = lexeme[1:] - delimCount := strings.Count(lexeme, "/") - if delimCount == 1 && len(lexeme) >= 3 { // container/object - return nativeschema.ObjectPrefix + "//" + lexeme, nil - } - } else { - delimCount := strings.Count(lexeme, "/") - if delimCount == 1 && len(lexeme) >= 3 || - delimCount == 2 && len(lexeme) >= 5 { // namespace/container/object - return nativeschema.ObjectPrefix + "/" + lexeme, nil - } - } - } else { - if lexeme == "*" { - return nativeschema.ResourceFormatAllContainers, nil - } else if lexeme == "/*" || lexeme == "root/*" { - return nativeschema.ResourceFormatRootContainers, nil - } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 { - lexeme = lexeme[1:] - delimCount := strings.Count(lexeme, "/") - if delimCount == 0 { - return nativeschema.ContainerPrefix + "//" + lexeme, nil - } - } else { - delimCount := strings.Count(lexeme, "/") - if delimCount == 1 && len(lexeme) > 3 { // namespace/container - return nativeschema.ContainerPrefix + "/" + lexeme, nil - } - } - } - } - return "", errUnsupportedResourceFormat -} - -const ( - ResourceCondition = "resourcecondition" - RequestCondition = "requestcondition" -) - -var typeToCondKindType = map[string]apechain.ConditionKindType{ - ResourceCondition: apechain.KindResource, - RequestCondition: apechain.KindRequest, -} - -func parseCondition(lexeme string) (*apechain.Condition, error) { - typ, expression, found := strings.Cut(lexeme, ":") - typ = strings.ToLower(typ) - - condKindType, ok := typeToCondKindType[typ] - if ok { - if !found { - return nil, fmt.Errorf("%w: %s", errInvalidConditionFormat, lexeme) - } - - var cond apechain.Condition - cond.Kind = condKindType - - lhs, rhs, binExpFound := strings.Cut(expression, "!=") - if !binExpFound { - lhs, rhs, binExpFound = strings.Cut(expression, "=") - if !binExpFound { - return nil, fmt.Errorf("%w: %s", errUnknownBinaryOperator, expression) - } - cond.Op = apechain.CondStringEquals - } else { - cond.Op = apechain.CondStringNotEquals - } - - cond.Key, cond.Value = lhs, rhs - return &cond, nil - } - return nil, fmt.Errorf("%w: %s", errUnknownCondObjectType, typ) -} diff --git a/pkg/util/ape/parser_test.go b/pkg/util/ape/parser_test.go deleted file mode 100644 index c236c4603..000000000 --- a/pkg/util/ape/parser_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package ape - -import ( - "fmt" - "testing" - - policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" -) - -func TestParseAPERule(t *testing.T) { - tests := [...]struct { - name string - rule string - expectErr error - expectRule policyengine.Rule - }{ - { - name: "Valid allow rule for all objects", - rule: "allow Object.Put *", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}}, - }, - }, - { - name: "Valid rule for all objects in implicit root namespace", - rule: "allow Object.Put /*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, - }, - }, - { - name: "Valid rule for all objects in explicit root namespace", - rule: "allow Object.Put root/*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, - }, - }, - { - name: "Valid rule for all containers in explicit root namespace", - rule: "allow Container.Put root/*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}}, - }, - }, - { - name: "Valid rule for all objects in root namespace and container", - rule: "allow Object.Put /cid/*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, "cid"), - }}, - }, - }, - { - name: "Valid rule for object in root namespace and container", - rule: "allow Object.Put /cid/oid", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, "cid", "oid"), - }}, - }, - }, - { - name: "Valid rule for all objects in namespace", - rule: "allow Object.Put ns/*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceObjects, "ns"), - }}, - }, - }, - { - name: "Valid rule for all objects in namespace and container", - rule: "allow Object.Put ns/cid/*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, "ns", "cid"), - }}, - }, - }, - { - name: "Valid rule for object in namespace and container", - rule: "allow Object.Put ns/cid/oid", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObject, "ns", "cid", "oid"), - }}, - }, - }, - { - name: "Valid deny rule", - rule: "deny Object.Put *", - expectRule: policyengine.Rule{ - Status: policyengine.AccessDenied, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}}, - }, - }, - { - name: "Valid deny rule with action detail", - rule: "deny:QuotaLimitReached Object.Put *", - expectRule: policyengine.Rule{ - Status: policyengine.QuotaLimitReached, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}}, - }, - }, - { - name: "Valid allow rule with conditions", - rule: "allow Object.Get ResourceCondition:Department=HR RequestCondition:Actor!=ownerA *", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}}, - Condition: []policyengine.Condition{ - { - Op: policyengine.CondStringEquals, - Kind: policyengine.KindResource, - Key: "Department", - Value: "HR", - }, - { - Op: policyengine.CondStringNotEquals, - Kind: policyengine.KindRequest, - Key: "Actor", - Value: "ownerA", - }, - }, - }, - }, - { - name: "Valid rule for object with conditions with action detail", - rule: "deny:QuotaLimitReached Object.Get ResourceCondition:Department=HR RequestCondition:Actor!=ownerA *", - expectRule: policyengine.Rule{ - Status: policyengine.QuotaLimitReached, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}}, - Condition: []policyengine.Condition{ - { - Op: policyengine.CondStringEquals, - Kind: policyengine.KindResource, - Key: "Department", - Value: "HR", - }, - { - Op: policyengine.CondStringNotEquals, - Kind: policyengine.KindRequest, - Key: "Actor", - Value: "ownerA", - }, - }, - }, - }, - { - name: "Invalid rule with unknown status", - rule: "permit Object.Put *", - expectErr: errUnknownStatus, - }, - { - name: "Invalid rule with unknown action", - rule: "allow Object.PutOut *", - expectErr: errUnknownAction, - }, - { - name: "Invalid rule with unknown status detail", - rule: "deny:UnknownActionDetail Object.Put *", - expectErr: errUnknownStatusDetail, - }, - { - name: "Invalid rule with unknown condition binary operator", - rule: "deny Object.Put ResourceCondition:Department
/etc/bash_completion.d/%s - MacOS: - $ %s completion bash > /usr/local/etc/bash_completion.d/%s - -Zsh: - If shell completion is not already enabled in your environment you will need - to enable it. You can execute the following once: - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - To load completions for each session, execute once: - $ %s completion zsh > "${fpath[1]}/_%s" - - You will need to start a new shell for this setup to take effect. - -Fish: - $ %s completion fish | source - - To load completions for each session, execute once: - $ %s completion fish > ~/.config/fish/completions/%s.fish -` - -// Command returns cobra command structure for autocomplete routine. -func Command(name string) *cobra.Command { - return &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: fmt.Sprintf(longHelpTemplate, - name, name, name, name, name, name, name, name, name, name), - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - _ = cmd.Root().GenBashCompletion(cmd.OutOrStdout()) - case "zsh": - _ = cmd.Root().GenZshCompletion(cmd.OutOrStdout()) - case "fish": - _ = cmd.Root().GenFishCompletion(cmd.OutOrStdout(), true) - case "powershell": - _ = cmd.Root().GenPowerShellCompletion(cmd.OutOrStdout()) - } - }, - } -} diff --git a/pkg/util/config/crypto.go b/pkg/util/config/crypto.go deleted file mode 100644 index fbfa7b484..000000000 --- a/pkg/util/config/crypto.go +++ /dev/null @@ -1,42 +0,0 @@ -package config - -import ( - "errors" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" -) - -// LoadAccount loads NEP-6 load, unlocks and returns the provided account. -func LoadAccount(path, addr, password string) (*wallet.Account, error) { - w, err := wallet.NewWalletFromFile(path) - if err != nil { - return nil, err - } - - var h util.Uint160 - if addr == "" { - h = w.GetChangeAddress() - if h.Equals(util.Uint160{}) { - return nil, errors.New("can't find a suitable account in the wallet") - } - } else { - h, err = address.StringToUint160(addr) - if err != nil { - return nil, err - } - } - - acc := w.GetAccount(h) - if acc == nil { - return nil, errors.New("account is missing") - } - - if err := acc.Decrypt(password, keys.NEP2ScryptParams()); err != nil { - return nil, err - } - - return acc, nil -} diff --git a/pkg/util/config/dir.go b/pkg/util/config/dir.go deleted file mode 100644 index 0379fe268..000000000 --- a/pkg/util/config/dir.go +++ /dev/null @@ -1,49 +0,0 @@ -package config - -import ( - "fmt" - "os" - "path" - - "github.com/spf13/viper" -) - -// ReadConfigDir reads all config files from provided directory in alphabetical order -// and merge its content with current viper configuration. -func ReadConfigDir(v *viper.Viper, configDir string) error { - entries, err := os.ReadDir(configDir) - if err != nil { - return err - } - - for _, entry := range entries { - if entry.IsDir() { - continue - } - ext := path.Ext(entry.Name()) - if ext != ".yaml" && ext != ".yml" && ext != ".json" { - continue - } - - if err = mergeConfig(v, path.Join(configDir, entry.Name())); err != nil { - return err - } - } - - return nil -} - -// mergeConfig reads config file and merge its content with current viper. -func mergeConfig(v *viper.Viper, fileName string) error { - cv := viper.New() - cv.SetConfigFile(fileName) - err := cv.ReadInConfig() - if err != nil { - return fmt.Errorf("failed to read config: %w", err) - } - if err = v.MergeConfigMap(cv.AllSettings()); err != nil { - return fmt.Errorf("failed to merge config: %w", err) - } - - return nil -} diff --git a/pkg/util/config/test/generate.go b/pkg/util/config/test/generate.go deleted file mode 100644 index 63e286615..000000000 --- a/pkg/util/config/test/generate.go +++ /dev/null @@ -1,58 +0,0 @@ -package configtest - -import ( - "crypto/rand" - "os" - "path" - "testing" - - "github.com/stretchr/testify/require" -) - -type MarshalFunc = func(any) ([]byte, error) - -type ConfigFile struct { - filename string - content map[string]any - marshal func(any) ([]byte, error) -} - -type DummyFile struct { - filename string - size int -} - -func NewConfigFile(filename string, content map[string]any, marshal MarshalFunc) ConfigFile { - return ConfigFile{ - filename: filename, - content: content, - marshal: marshal, - } -} - -func NewDummyFile(filename string, size int) DummyFile { - return DummyFile{ - filename: filename, - size: size, - } -} - -func PrepareConfigFiles(t *testing.T, dir string, files []ConfigFile) { - for _, file := range files { - data, err := file.marshal(file.content) - require.NoError(t, err) - - err = os.WriteFile(path.Join(dir, file.filename), data, 0o600) - require.NoError(t, err) - } -} - -func PrepareDummyFiles(t *testing.T, dir string, files []DummyFile) { - for _, file := range files { - data := make([]byte, file.size) - _, _ = rand.Read(data) - - err := os.WriteFile(path.Join(dir, file.filename), data, 0o600) - require.NoError(t, err) - } -} diff --git a/pkg/util/gendoc/gendoc.go b/pkg/util/gendoc/gendoc.go deleted file mode 100644 index 8897bf1e5..000000000 --- a/pkg/util/gendoc/gendoc.go +++ /dev/null @@ -1,180 +0,0 @@ -package gendoc - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "text/template" - "time" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" - "github.com/spf13/pflag" -) - -const ( - gendocTypeFlag = "type" - - gendocMarkdown = "md" - gendocMan = "man" - - depthFlag = "depth" - extensionFlag = "extension" -) - -// Options for doc generation. -type Options struct { - // Parameters for man generation. By default use (1) section and `FrostFS` source. - ManHeader *doc.GenManHeader - // TypeFlag is the flag to use for type, without leading `--`. - // Do not use unless really necessary. - // Default: `type`. - TypeFlag string - // DepthFlag is the flag to use for depth, without leading `--`. - // Do not use unless really necessary. - // Default: `depth`. - DepthFlag string - // ExtensionFlag is the flag to use for extension, without leading `--`. - // Do not use unless really necessary. - // Default: `extension`. - ExtensionFlag string -} - -func (o *Options) fillDefaults() { - if o.ManHeader == nil { - now := time.Now() - o.ManHeader = &doc.GenManHeader{ - Section: "1", - Source: "FrostFS", - Date: &now, - } - } - if o.TypeFlag == "" { - o.TypeFlag = gendocTypeFlag - } - if o.DepthFlag == "" { - o.DepthFlag = depthFlag - } - if o.ExtensionFlag == "" { - o.ExtensionFlag = extensionFlag - } -} - -// Command returns command which generates user documentation for the argument. -func Command(rootCmd *cobra.Command, opts Options) *cobra.Command { - opts.fillDefaults() - - gendocCmd := &cobra.Command{ - Use: "gendoc ", - Short: "Generate documentation for this command", - Long: `Generate documentation for this command. If the template is not provided, -builtin cobra generator is used and each subcommand is placed in -a separate file in the same directory. - -The last optional argument specifies the template to use with text/template. -In this case there is a number of helper functions which can be used: - replace STR FROM TO -- same as strings.ReplaceAll - join ARRAY SEPARATOR -- same as strings.Join - split STR SEPARATOR -- same as strings.Split - fullUse CMD -- slice of all command names starting from the parent - listFlags CMD -- list of command flags -`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - _ = cmd.Usage() - os.Exit(1) - } - - err := os.MkdirAll(args[0], os.ModePerm) - if err != nil { - return fmt.Errorf("can't create directory: %w", err) - } - - if len(args) == 2 { - data, err := os.ReadFile(args[1]) - if err != nil { - return fmt.Errorf("can't read the template '%s': %w", args[1], err) - } - - return generateTemplate(cmd, rootCmd, args[0], data) - } - - typ, _ := cmd.Flags().GetString(gendocTypeFlag) - switch typ { - case gendocMarkdown: - return doc.GenMarkdownTree(rootCmd, args[0]) - case gendocMan: - return doc.GenManTree(rootCmd, opts.ManHeader, args[0]) - default: - return errors.New("type must be 'md' or 'man'") - } - }, - } - - ff := gendocCmd.Flags() - ff.String(opts.TypeFlag, gendocMarkdown, "Type for the documentation ('md' or 'man')") - ff.Int(opts.DepthFlag, 1, "If template is specified, unify all commands starting from depth in a single file. Default: 1.") - ff.String(opts.ExtensionFlag, "", "If the template is specified, string to append to the output file names") - return gendocCmd -} - -func generateTemplate(cmd *cobra.Command, rootCmd *cobra.Command, outDir string, tmpl []byte) error { - depth, _ := cmd.Flags().GetInt(depthFlag) - ext, _ := cmd.Flags().GetString(extensionFlag) - - tm := template.New("doc") - tm.Funcs(template.FuncMap{ - "replace": strings.ReplaceAll, - "split": strings.Split, - "join": strings.Join, - "fullUse": fullUse, - "listFlags": listFlags, - }) - - tm, err := tm.Parse(string(tmpl)) - if err != nil { - return err - } - - return visit(rootCmd, outDir, ext, depth, tm) -} - -func visit(rootCmd *cobra.Command, outDir string, ext string, depth int, tm *template.Template) error { - if depth == 0 { - name := strings.Join(fullUse(rootCmd), "-") - name = strings.TrimSpace(name) - name = strings.ReplaceAll(name, " ", "-") - name = filepath.Join(outDir, name) + ext - f, err := os.Create(name) - if err != nil { - return fmt.Errorf("can't create file '%s': %w", name, err) - } - defer f.Close() - return tm.Execute(f, rootCmd) - } - - for _, c := range rootCmd.Commands() { - err := visit(c, outDir, ext, depth-1, tm) - if err != nil { - return err - } - } - return nil -} - -func fullUse(c *cobra.Command) []string { - if c == nil { - return nil - } - return append(fullUse(c.Parent()), c.Name()) -} - -func listFlags(c *cobra.Command) []*pflag.Flag { - var res []*pflag.Flag - c.Flags().VisitAll(func(f *pflag.Flag) { - res = append(res, f) - }) - return res -} diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go deleted file mode 100644 index 8569ec734..000000000 --- a/pkg/util/http/calls.go +++ /dev/null @@ -1,43 +0,0 @@ -package httputil - -import ( - "context" - "errors" - "net/http" -) - -// Serve listens and serves the internal HTTP server. -// -// Returns any error returned by the internal server -// except http.ErrServerClosed. -// -// After Shutdown call, Serve has no effect and -// the returned error is always nil. -func (x *Server) Serve() error { - err := x.srv.ListenAndServe() - - // http.ErrServerClosed is returned on server shutdown - // so we ignore this error. - if err != nil && errors.Is(err, http.ErrServerClosed) { - err = nil - } - - return err -} - -// Shutdown gracefully shuts down the internal HTTP server. -// -// Shutdown is called with the context which expires after -// the configured timeout. -// -// Once Shutdown has been called on a server, it may not be reused; -// future calls to Serve method will have no effect. -func (x *Server) Shutdown(ctx context.Context) error { - ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout) - - err := x.srv.Shutdown(ctx) - - cancel() - - return err -} diff --git a/pkg/util/http/opts.go b/pkg/util/http/opts.go deleted file mode 100644 index 852cdf117..000000000 --- a/pkg/util/http/opts.go +++ /dev/null @@ -1,26 +0,0 @@ -package httputil - -import ( - "time" -) - -// Option sets an optional parameter of Server. -type Option func(*cfg) - -type cfg struct { - shutdownTimeout time.Duration -} - -func defaultCfg() *cfg { - return &cfg{ - shutdownTimeout: 15 * time.Second, - } -} - -// WithShutdownTimeout returns an option to set shutdown timeout -// of the internal HTTP server. -func WithShutdownTimeout(dur time.Duration) Option { - return func(c *cfg) { - c.shutdownTimeout = dur - } -} diff --git a/pkg/util/http/pprof.go b/pkg/util/http/pprof.go deleted file mode 100644 index f85fd2ea9..000000000 --- a/pkg/util/http/pprof.go +++ /dev/null @@ -1,22 +0,0 @@ -package httputil - -import ( - "net/http" - "net/http/pprof" - - "github.com/felixge/fgprof" -) - -func init() { - http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) -} - -// initializes pprof package in order to -// register Prometheus handlers on http.DefaultServeMux. -var _ = pprof.Handler("") - -// Handler returns http.Handler for the -// Prometheus metrics collector. -func Handler() http.Handler { - return http.DefaultServeMux -} diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go deleted file mode 100644 index 2589ab786..000000000 --- a/pkg/util/http/server.go +++ /dev/null @@ -1,101 +0,0 @@ -package httputil - -import ( - "fmt" - "net/http" - "time" -) - -// HTTPSrvPrm groups the required parameters of the Server's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type HTTPSrvPrm struct { - // TCP address for the server to listen on. - // - // Must be a valid TCP address. - Address string - - // Must not be nil. - Handler http.Handler -} - -// Server represents a wrapper over http.Server -// that provides an interface to start and stop -// listening routine. -// -// For correct operation, Server must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// Server is immediately ready to work through API. -type Server struct { - shutdownTimeout time.Duration - - srv *http.Server -} - -const invalidValFmt = "invalid %s %s (%T): %v" - -func panicOnPrmValue(n string, v any) { - panicOnValue("parameter", n, v) -} - -func panicOnOptValue(n string, v any) { - panicOnValue("option", n, v) -} - -func panicOnValue(t, n string, v any) { - panic(fmt.Sprintf(invalidValFmt, t, n, v, v)) -} - -func checkSrvPrm(addr string, handler http.Handler) { - switch { - case addr == "": - panicOnPrmValue("Address", addr) - case handler == nil: - panicOnPrmValue("Handler", handler) - } -} - -// New creates a new instance of the Server. -// -// Panics if at least one value of the parameters is invalid. -// -// Panics if at least one of next optional parameters is invalid: -// - shutdown timeout is non-positive. -// -// The created Server does not require additional -// initialization and is completely ready for work. -func New(prm HTTPSrvPrm, opts ...Option) *Server { - checkSrvPrm(prm.Address, prm.Handler) - - c := defaultCfg() - - for _, o := range opts { - o(c) - } - - if c.shutdownTimeout <= 0 { - panicOnOptValue("shutdown timeout", c.shutdownTimeout) - } - - return &Server{ - shutdownTimeout: c.shutdownTimeout, - srv: &http.Server{ - Addr: prm.Address, - Handler: prm.Handler, - }, - } -} - -// NewHTTPSrvPrm creates a new instance of the HTTPSrvPrm. -// -// Panics if at least one value of the parameters is invalid. -func NewHTTPSrvPrm(addr string, handler http.Handler) *HTTPSrvPrm { - checkSrvPrm(addr, handler) - return &HTTPSrvPrm{ - Address: addr, - Handler: handler, - } -} diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go deleted file mode 100644 index 6337039a9..000000000 --- a/pkg/util/keyer/dashboard.go +++ /dev/null @@ -1,120 +0,0 @@ -package keyer - -import ( - "encoding/hex" - "fmt" - "os" - "text/tabwriter" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "github.com/mr-tron/base58" - "github.com/nspcc-dev/neo-go/pkg/crypto/hash" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type ( - Dashboard struct { - privKey *keys.PrivateKey - pubKey *keys.PublicKey - scriptHash3 util.Uint160 - - multisigKeys keys.PublicKeys - } -) - -func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { - var ( - data []byte - - privKey, pubKey, wif, wallet3, sh3, shBE3, multiSigAddr string - ) - - w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) - - if d.privKey != nil { - privKey = d.privKey.String() - - wif = d.privKey.WIF() - if useHex { - wif = base58ToHex(wif) - } - } - - if d.pubKey != nil { - if uncompressed { - data = d.pubKey.UncompressedBytes() - } else { - data = d.pubKey.Bytes() - } - - pubKey = hex.EncodeToString(data) - } - - if !d.scriptHash3.Equals(util.Uint160{}) { - sh3 = d.scriptHash3.StringLE() - shBE3 = d.scriptHash3.StringBE() - wallet3 = address.Uint160ToString(d.scriptHash3) - - if useHex { - wallet3 = base58ToHex(wallet3) - } - } - - if len(d.multisigKeys) != 0 { - u160, err := scriptHashFromMultikey(d.multisigKeys) - if err != nil { - panic("can't create multisig redeem script") - } - - multiSigAddr = address.Uint160ToString(u160) - } - - if privKey != "" { - fmt.Fprintf(w, "PrivateKey\t%s\n", privKey) - } - - if pubKey != "" { - fmt.Fprintf(w, "PublicKey\t%s\n", pubKey) - } - - if wif != "" { - fmt.Fprintf(w, "WIF\t%s\n", wif) - } - - if wallet3 != "" { - fmt.Fprintf(w, "Wallet3.0\t%s\n", wallet3) - } - - if sh3 != "" { - fmt.Fprintf(w, "ScriptHash3.0\t%s\n", sh3) - } - - if shBE3 != "" { - fmt.Fprintf(w, "ScriptHash3.0BE\t%s\n", shBE3) - } - - if multiSigAddr != "" { - fmt.Fprintf(w, "MultiSigAddress\t%s\n", multiSigAddr) - } - - w.Flush() -} - -func base58ToHex(data string) string { - val, err := base58.Decode(data) - assert.NoError(err, "produced incorrect base58 value") - - return hex.EncodeToString(val) -} - -func scriptHashFromMultikey(k keys.PublicKeys) (util.Uint160, error) { - script, err := smartcontract.CreateDefaultMultiSigRedeemScript(k) - if err != nil { - return util.Uint160{}, fmt.Errorf("can't create multisig redeem script: %w", err) - } - - return hash.Hash160(script), nil -} diff --git a/pkg/util/keyer/parser.go b/pkg/util/keyer/parser.go deleted file mode 100644 index 4023e3981..000000000 --- a/pkg/util/keyer/parser.go +++ /dev/null @@ -1,118 +0,0 @@ -package keyer - -import ( - "crypto/elliptic" - "encoding/hex" - "errors" - "fmt" - "strings" - - "github.com/mr-tron/base58" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -const ( - NeoPrivateKeySize = 32 - - scriptHashSize = 20 - addressSize = 25 - publicKeyCompressedSize = 33 - wifSize = 38 - publicKeyUncompressedSize = 65 -) - -var errInputType = errors.New("unknown input type") - -func (d *Dashboard) ParseString(data string) error { - // just in case remove 0x prefixes if there are some - data = strings.TrimPrefix(data, "0x") - data = strings.TrimPrefix(data, "0X") - - var ( - rawData []byte - err error - ) - - // data could be encoded in base58 or hex formats, try both - rawData, err = hex.DecodeString(data) - if err != nil { - rawData, err = base58.Decode(data) - if err != nil { - return fmt.Errorf("data is not hex or base58 encoded: %w", err) - } - } - - return d.ParseBinary(rawData) -} - -func (d *Dashboard) ParseBinary(data []byte) error { - var err error - - switch len(data) { - case NeoPrivateKeySize: - d.privKey, err = keys.NewPrivateKeyFromBytes(data) - if err != nil { - return fmt.Errorf("can't parse private key: %w", err) - } - case wifSize: - d.privKey, err = keys.NewPrivateKeyFromWIF(base58.Encode(data)) - if err != nil { - return fmt.Errorf("can't parse WIF: %w", err) - } - case publicKeyCompressedSize, publicKeyUncompressedSize: - d.pubKey, err = keys.NewPublicKeyFromBytes(data, elliptic.P256()) - if err != nil { - return fmt.Errorf("can't parse public key: %w", err) - } - case addressSize: - d.scriptHash3, err = address.StringToUint160(base58.Encode(data)) - if err != nil { - return fmt.Errorf("can't parse address: %w", err) - } - case scriptHashSize: - sc, err := util.Uint160DecodeBytesLE(data) - if err != nil { - return fmt.Errorf("can't parse script hash: %w", err) - } - - d.scriptHash3 = sc - default: - return errInputType - } - - d.fill() - - return nil -} - -func (d *Dashboard) ParseMultiSig(data []string) error { - d.multisigKeys = make(keys.PublicKeys, 0, len(data)) - - for i := range data { - data, err := hex.DecodeString(data[i]) - if err != nil { - return fmt.Errorf("pass only hex encoded public keys: %w", err) - } - - key, err := keys.NewPublicKeyFromBytes(data, elliptic.P256()) - if err != nil { - return fmt.Errorf("pass only hex encoded public keys: %w", err) - } - - d.multisigKeys = append(d.multisigKeys, key) - } - - return nil -} - -func (d *Dashboard) fill() { - if d.privKey != nil { - d.pubKey = d.privKey.PublicKey() - } - - if d.pubKey != nil { - d.scriptHash3 = d.pubKey.GetScriptHash() - } -} diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go deleted file mode 100644 index 413b1d9aa..000000000 --- a/pkg/util/logger/log.go +++ /dev/null @@ -1,35 +0,0 @@ -package logger - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "go.uber.org/zap" -) - -func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Debug(msg, appendContext(ctx, fields...)...) -} - -func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Info(msg, appendContext(ctx, fields...)...) -} - -func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Warn(msg, appendContext(ctx, fields...)...) -} - -func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Error(msg, appendContext(ctx, fields...)...) -} - -func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - fields = append(fields, zap.String("trace_id", traceID)) - } - if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined { - fields = append(fields, zap.String("io_tag", ioTag)) - } - return fields -} diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go deleted file mode 100644 index a1998cb1a..000000000 --- a/pkg/util/logger/logger.go +++ /dev/null @@ -1,243 +0,0 @@ -package logger - -import ( - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/zapjournald" - "github.com/ssgreg/journald" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Logger represents a component -// for writing messages to log. -type Logger struct { - z *zap.Logger - c zapcore.Core - t Tag - w bool -} - -// Prm groups Logger's parameters. -// Successful passing non-nil parameters to the NewLogger (if returned -// error is nil) connects the parameters with the returned Logger. -// Parameters that have been connected to the Logger support its -// configuration changing. -// -// See also Logger.Reload, SetLevelString. -type Prm struct { - // support runtime rereading - level zapcore.Level - - // SamplingHook hook for the zap.Logger - SamplingHook func(e zapcore.Entry, sd zapcore.SamplingDecision) - - // do not support runtime rereading - dest string - - // PrependTimestamp specifies whether to prepend a timestamp in the log - PrependTimestamp bool - - // Options for zap.Logger - Options []zap.Option - - // map of tag's bit masks to log level, overrides lvl - tl map[Tag]zapcore.Level -} - -const ( - DestinationUndefined = "" - DestinationStdout = "stdout" - DestinationJournald = "journald" -) - -// SetLevelString sets the minimum logging level. Default is -// "info". -// -// Returns an error if s is not a string representation of a -// supporting logging level. -// -// Supports runtime rereading. -func (p *Prm) SetLevelString(s string) error { - return p.level.UnmarshalText([]byte(s)) -} - -func (p *Prm) SetDestination(d string) error { - if d != DestinationStdout && d != DestinationJournald { - return fmt.Errorf("invalid logger destination %s", d) - } - if p != nil { - p.dest = d - } - return nil -} - -// SetTags parses list of tags with log level. -func (p *Prm) SetTags(tags [][]string) (err error) { - p.tl, err = parseTags(tags) - return err -} - -// NewLogger constructs a new zap logger instance. Constructing with nil -// parameters is safe: default values will be used then. -// Passing non-nil parameters after a successful creation (non-error) allows -// runtime reconfiguration. -// -// Logger is built from production logging configuration with: -// - parameterized level; -// - console encoding; -// - ISO8601 time encoding. -// -// Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm Prm) (*Logger, error) { - switch prm.dest { - case DestinationUndefined, DestinationStdout: - return newConsoleLogger(prm) - case DestinationJournald: - return newJournaldLogger(prm) - default: - return nil, fmt.Errorf("unknown destination %s", prm.dest) - } -} - -func newConsoleLogger(prm Prm) (*Logger, error) { - c := zap.NewProductionConfig() - c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) - c.Encoding = "console" - if prm.SamplingHook != nil { - c.Sampling.Hook = prm.SamplingHook - } - - if prm.PrependTimestamp { - c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - } else { - c.EncoderConfig.TimeKey = "" - } - - opts := []zap.Option{ - zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), - zap.AddCallerSkip(1), - } - opts = append(opts, prm.Options...) - lZap, err := c.Build(opts...) - if err != nil { - return nil, err - } - l := &Logger{z: lZap, c: lZap.Core()} - l = l.WithTag(TagMain) - - return l, nil -} - -func newJournaldLogger(prm Prm) (*Logger, error) { - c := zap.NewProductionConfig() - if prm.SamplingHook != nil { - c.Sampling.Hook = prm.SamplingHook - } - - if prm.PrependTimestamp { - c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - } else { - c.EncoderConfig.TimeKey = "" - } - - encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - - core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) - coreWithContext := core.With([]zapcore.Field{ - zapjournald.SyslogFacility(zapjournald.LogDaemon), - zapjournald.SyslogIdentifier(), - zapjournald.SyslogPid(), - }) - - var samplerOpts []zapcore.SamplerOption - if c.Sampling.Hook != nil { - samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook)) - } - samplingCore := zapcore.NewSamplerWithOptions( - coreWithContext, - time.Second, - c.Sampling.Initial, - c.Sampling.Thereafter, - samplerOpts..., - ) - opts := []zap.Option{ - zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), - zap.AddCallerSkip(1), - } - opts = append(opts, prm.Options...) - lZap := zap.New(samplingCore, opts...) - l := &Logger{z: lZap, c: lZap.Core()} - l = l.WithTag(TagMain) - - return l, nil -} - -// With create a child logger with new fields, don't affect the parent. -// Throws panic if tag is unset. -func (l *Logger) With(fields ...zap.Field) *Logger { - if l.t == 0 { - panic("tag is unset") - } - c := *l - c.z = l.z.With(fields...) - // With called under the logger - c.w = true - return &c -} - -type core struct { - c zapcore.Core - l zap.AtomicLevel -} - -func (c *core) Enabled(lvl zapcore.Level) bool { - return c.l.Enabled(lvl) -} - -func (c *core) With(fields []zapcore.Field) zapcore.Core { - clone := *c - clone.c = clone.c.With(fields) - return &clone -} - -func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - return c.c.Check(e, ce) -} - -func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { - return c.c.Write(e, fields) -} - -func (c *core) Sync() error { - return c.c.Sync() -} - -// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. -// Throws panic if provided unsupported tag. -func (l *Logger) WithTag(tag Tag) *Logger { - if tag == 0 || tag > Tag(len(_Tag_index)-1) { - panic("unsupported tag " + tag.String()) - } - if l.w { - panic("unsupported operation for the logger's state") - } - c := *l - c.t = tag - c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { - return &core{ - c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), - l: tagToLogLevel[tag], - } - })) - return &c -} - -func NewLoggerWrapper(z *zap.Logger) *Logger { - return &Logger{ - z: z.WithOptions(zap.AddCallerSkip(1)), - t: TagMain, - c: z.Core(), - } -} diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go deleted file mode 100644 index b867ee6cc..000000000 --- a/pkg/util/logger/logger_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package logger - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest/observer" -) - -func BenchmarkLogger(b *testing.B) { - ctx := context.Background() - m := map[string]Prm{} - - prm := Prm{} - require.NoError(b, prm.SetLevelString("debug")) - m["logging enabled"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("error")) - m["logging disabled"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("error")) - require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) - m["logging enabled via tags"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("debug")) - require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) - m["logging disabled via tags"] = prm - - for k, v := range m { - b.Run(k, func(b *testing.B) { - logger, err := createLogger(v) - require.NoError(b, err) - UpdateLevelForTags(v) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - logger.Info(ctx, "test info") - } - }) - } -} - -type testCore struct { - core zapcore.Core -} - -func (c *testCore) Enabled(lvl zapcore.Level) bool { - return c.core.Enabled(lvl) -} - -func (c *testCore) With(fields []zapcore.Field) zapcore.Core { - c.core = c.core.With(fields) - return c -} - -func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - return ce.AddCore(e, c) -} - -func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { - return nil -} - -func (c *testCore) Sync() error { - return c.core.Sync() -} - -func createLogger(prm Prm) (*Logger, error) { - prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { - tc := testCore{core: core} - return &tc - })} - return NewLogger(prm) -} - -func TestLoggerOutput(t *testing.T) { - obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) - - prm := Prm{} - require.NoError(t, prm.SetLevelString("debug")) - prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { - return obs - })} - loggerMain, err := NewLogger(prm) - require.NoError(t, err) - UpdateLevelForTags(prm) - - loggerMainWith := loggerMain.With(zap.String("key", "value")) - - require.Panics(t, func() { - loggerMainWith.WithTag(TagShard) - }) - loggerShard := loggerMain.WithTag(TagShard) - loggerShard = loggerShard.With(zap.String("key1", "value1")) - - loggerMorph := loggerMain.WithTag(TagMorph) - loggerMorph = loggerMorph.With(zap.String("key2", "value2")) - - ctx := context.Background() - loggerMain.Debug(ctx, "main") - loggerMainWith.Debug(ctx, "main with") - loggerShard.Debug(ctx, "shard") - loggerMorph.Debug(ctx, "morph") - - require.Len(t, logs.All(), 4) - require.Len(t, logs.FilterFieldKey("key").All(), 1) - require.Len(t, logs.FilterFieldKey("key1").All(), 1) - require.Len(t, logs.FilterFieldKey("key2").All(), 1) - require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) - require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) - require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) -} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result deleted file mode 100644 index 612fa2967..000000000 --- a/pkg/util/logger/logger_test.result +++ /dev/null @@ -1,46 +0,0 @@ -goos: linux -goarch: amd64 -pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger -cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz -BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op -PASS -ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s diff --git a/pkg/util/logger/metrics.go b/pkg/util/logger/metrics.go deleted file mode 100644 index 7e62e6383..000000000 --- a/pkg/util/logger/metrics.go +++ /dev/null @@ -1,48 +0,0 @@ -package logger - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap/zapcore" -) - -const ( - logSubsystem = "logger" - logLevelLabel = "level" - logDroppedLabel = "dropped" -) - -type LogMetrics interface { - Inc(level zapcore.Level, dropped bool) - GetSamplingHook() func(e zapcore.Entry, sd zapcore.SamplingDecision) -} - -type logMetrics struct { - logCount *prometheus.CounterVec -} - -func NewLogMetrics(namespace string) LogMetrics { - return &logMetrics{ - logCount: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: logSubsystem, - Name: "entry_count", - Help: "Total log entries emitted or dropped by severity level", - }, []string{logLevelLabel, logDroppedLabel}), - } -} - -func (m *logMetrics) Inc(level zapcore.Level, dropped bool) { - m.logCount.With(prometheus.Labels{ - logLevelLabel: level.String(), - logDroppedLabel: strconv.FormatBool(dropped), - }).Inc() -} - -func (m *logMetrics) GetSamplingHook() func(zapcore.Entry, zapcore.SamplingDecision) { - return func(e zapcore.Entry, sd zapcore.SamplingDecision) { - m.Inc(e.Level, sd == zapcore.LogDropped) - } -} diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go deleted file mode 100644 index 1b98f2e62..000000000 --- a/pkg/util/logger/tag_string.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. - -package logger - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TagMain-1] - _ = x[TagMorph-2] - _ = x[TagGrpcSvc-3] - _ = x[TagIr-4] - _ = x[TagProcessor-5] - _ = x[TagEngine-6] - _ = x[TagBlobovnicza-7] - _ = x[TagBlobovniczaTree-8] - _ = x[TagBlobstor-9] - _ = x[TagFSTree-10] - _ = x[TagGC-11] - _ = x[TagShard-12] - _ = x[TagWriteCache-13] - _ = x[TagDeleteSvc-14] - _ = x[TagGetSvc-15] - _ = x[TagSearchSvc-16] - _ = x[TagSessionSvc-17] - _ = x[TagTreeSvc-18] - _ = x[TagPolicer-19] - _ = x[TagReplicator-20] -} - -const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" - -var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} - -func (i Tag) String() string { - i -= 1 - if i >= Tag(len(_Tag_index)-1) { - return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] -} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go deleted file mode 100644 index a5386707e..000000000 --- a/pkg/util/logger/tags.go +++ /dev/null @@ -1,94 +0,0 @@ -package logger - -import ( - "fmt" - "strings" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -//go:generate stringer -type Tag -linecomment - -type Tag uint8 - -const ( - _ Tag = iota // - TagMain // main - TagMorph // morph - TagGrpcSvc // grpcsvc - TagIr // ir - TagProcessor // processor - TagEngine // engine - TagBlobovnicza // blobovnicza - TagBlobovniczaTree // blobovniczatree - TagBlobstor // blobstor - TagFSTree // fstree - TagGC // gc - TagShard // shard - TagWriteCache // writecache - TagDeleteSvc // deletesvc - TagGetSvc // getsvc - TagSearchSvc // searchsvc - TagSessionSvc // sessionsvc - TagTreeSvc // treesvc - TagPolicer // policer - TagReplicator // replicator - - defaultLevel = zapcore.InfoLevel -) - -var ( - tagToLogLevel = map[Tag]zap.AtomicLevel{} - stringToTag = map[string]Tag{} -) - -func init() { - for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { - tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) - stringToTag[i.String()] = i - } -} - -// parseTags returns: -// - map(always instantiated) of tag to custom log level for that tag; -// - error if it occurred(map is empty). -func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { - m := make(map[Tag]zapcore.Level) - if len(raw) == 0 { - return m, nil - } - for _, item := range raw { - str, level := item[0], item[1] - if len(level) == 0 { - // It is not necessary to parse tags without level, - // because default log level will be used. - continue - } - var l zapcore.Level - err := l.UnmarshalText([]byte(level)) - if err != nil { - return nil, err - } - tmp := strings.Split(str, ",") - for _, tagStr := range tmp { - tag, ok := stringToTag[strings.TrimSpace(tagStr)] - if !ok { - return nil, fmt.Errorf("unsupported tag %s", str) - } - m[tag] = l - } - } - return m, nil -} - -func UpdateLevelForTags(prm Prm) { - for k, v := range tagToLogLevel { - nk, ok := prm.tl[k] - if ok { - v.SetLevel(nk) - } else { - v.SetLevel(prm.level) - } - } -} diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go deleted file mode 100644 index b5b0a31eb..000000000 --- a/pkg/util/logger/test/logger.go +++ /dev/null @@ -1,20 +0,0 @@ -package test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest" -) - -// NewLogger creates a new logger. -func NewLogger(t testing.TB) *logger.Logger { - return logger.NewLoggerWrapper( - zaptest.NewLogger(t, - zaptest.Level(zapcore.DebugLevel), - zaptest.WrapOptions(zap.Development(), zap.AddCaller()), - ), - ) -} diff --git a/pkg/util/os.go b/pkg/util/os.go deleted file mode 100644 index 30e08a8c3..000000000 --- a/pkg/util/os.go +++ /dev/null @@ -1,10 +0,0 @@ -package util - -import "os" - -// MkdirAllX calls os.MkdirAll with the passed permissions -// but with +x for a user and a group. This makes the created -// dir openable regardless of the passed permissions. -func MkdirAllX(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm|0o110) -} diff --git a/pkg/util/precision/converter.go b/pkg/util/precision/converter.go deleted file mode 100644 index bd2948f0a..000000000 --- a/pkg/util/precision/converter.go +++ /dev/null @@ -1,93 +0,0 @@ -package precision - -import ( - "math" - "math/big" -) - -type ( - // Converter is a cached wrapper on `convert` function. It caches base and - // target precisions and factor. - converter struct { - base uint32 // base precision - target uint32 // target precision - - factor *big.Int - } - - // Fixed8Converter is a converter with base precision of Fixed8. It uses - // int64 values because there is a guarantee that balance contract will - // operate with `Deposit` and `Withdraw` amounts that less than 2**53-1. - // This is a JSON bound that uses neo node. Neo-go has int64 limit for - // `smartcontract.Parameter` of integer type. - Fixed8Converter struct { - converter - } -) - -const fixed8Precision = 8 - -// convert is the function that converts `n` to desired precision by using -// factor value. -func convert(n, factor *big.Int, decreasePrecision bool) *big.Int { - if decreasePrecision { - return new(big.Int).Div(n, factor) - } - - return new(big.Int).Mul(n, factor) -} - -// NewConverter returns Fixed8Converter. -func NewConverter(precision uint32) Fixed8Converter { - var c Fixed8Converter - - c.SetBalancePrecision(precision) - - return c -} - -func (c converter) toTarget(n *big.Int) *big.Int { - return convert(n, c.factor, c.base > c.target) -} - -func (c converter) toBase(n *big.Int) *big.Int { - return convert(n, c.factor, c.base < c.target) -} - -// ToFixed8 converts n of balance contract precision to Fixed8 precision. -func (c Fixed8Converter) ToFixed8(n int64) int64 { - return c.toBase(new(big.Int).SetInt64(n)).Int64() -} - -// ToBalancePrecision converts n of Fixed8 precision to balance contract precision. -func (c Fixed8Converter) ToBalancePrecision(n int64) int64 { - return c.toTarget(new(big.Int).SetInt64(n)).Int64() -} - -// SetBalancePrecision prepares converter to work. -func (c *Fixed8Converter) SetBalancePrecision(precision uint32) { - exp := int(precision) - fixed8Precision - if exp < 0 { - exp = -exp - } - - c.base = fixed8Precision - c.target = precision - c.factor = new(big.Int).SetInt64(int64(math.Pow10(exp))) -} - -// Convert is a wrapper of convert function. Use cached `converter` struct -// if fromPrecision and toPrecision are constant. -func Convert(fromPrecision, toPrecision uint32, n *big.Int) *big.Int { - var decreasePrecision bool - - exp := int(toPrecision) - int(fromPrecision) - if exp < 0 { - decreasePrecision = true - exp = -exp - } - - factor := new(big.Int).SetInt64(int64(math.Pow10(exp))) - - return convert(n, factor, decreasePrecision) -} diff --git a/pkg/util/precision/converter_test.go b/pkg/util/precision/converter_test.go deleted file mode 100644 index 264e90b3c..000000000 --- a/pkg/util/precision/converter_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package precision_test - -import ( - "math/big" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/precision" - "github.com/stretchr/testify/require" -) - -func TestFixed8Converter_ToBalancePrecision(t *testing.T) { - var n int64 = 100*100_000_000 + 12_345_678 - - t.Run("same precision", func(t *testing.T) { - cnv := precision.NewConverter(8) - res := cnv.ToBalancePrecision(n) - require.Equal(t, n, res) - }) - - t.Run("bigger target", func(t *testing.T) { - cnv := precision.NewConverter(10) - exp := n * 100 - res := cnv.ToBalancePrecision(n) - require.Equal(t, exp, res) - }) - - t.Run("less target", func(t *testing.T) { - cnv := precision.NewConverter(6) - exp := n / 100 - res := cnv.ToBalancePrecision(n) - require.Equal(t, exp, res) - - cnv = precision.NewConverter(0) - exp = n / 100_000_000 - res = cnv.ToBalancePrecision(n) - require.Equal(t, exp, res) - }) -} - -func TestFixed8Converter_ToFixed8(t *testing.T) { - var n int64 = 100*10_000_000 + 12_345_678 - - t.Run("same precision", func(t *testing.T) { - cnv := precision.NewConverter(8) - res := cnv.ToFixed8(n) - require.Equal(t, n, res) - }) - - t.Run("bigger target", func(t *testing.T) { - cnv := precision.NewConverter(10) - exp := n / 100 - res := cnv.ToFixed8(n) - require.Equal(t, exp, res) - }) - - t.Run("less target", func(t *testing.T) { - cnv := precision.NewConverter(6) - exp := n * 100 - res := cnv.ToFixed8(n) - require.Equal(t, exp, res) - - n = 1 - cnv = precision.NewConverter(0) - exp = n * 100_000_000 - res = cnv.ToFixed8(n) - require.Equal(t, exp, res) - }) -} - -func TestConvert(t *testing.T) { - n := big.NewInt(100*10_000_000 + 12_345_678) - - t.Run("same precision", func(t *testing.T) { - require.Equal(t, n, precision.Convert(8, 8, n)) - require.Equal(t, n, precision.Convert(0, 0, n)) - }) - - t.Run("bigger target", func(t *testing.T) { - exp := new(big.Int).Mul(n, big.NewInt(100)) - require.Equal(t, exp, precision.Convert(8, 10, n)) - require.Equal(t, exp, precision.Convert(0, 2, n)) - }) - - t.Run("less target", func(t *testing.T) { - exp := new(big.Int).Div(n, big.NewInt(100)) - require.Equal(t, exp, precision.Convert(10, 8, n)) - require.Equal(t, exp, precision.Convert(2, 0, n)) - }) -} diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go deleted file mode 100644 index a06296a07..000000000 --- a/pkg/util/rand/rand.go +++ /dev/null @@ -1,44 +0,0 @@ -package rand - -import ( - crand "crypto/rand" - "encoding/binary" - mrand "math/rand" -) - -var source = mrand.New(&cryptoSource{}) - -// Uint64 returns a random uint64 value. -func Uint64() uint64 { - return source.Uint64() -} - -// Uint32 returns a random uint32 value. -func Uint32() uint32 { - return source.Uint32() -} - -// Shuffle randomizes the order of elements. -// n is the number of elements. Shuffle panics if n < 0. -// swap swaps the elements with indexes i and j. -func Shuffle(n int, swap func(i, j int)) { - source.Shuffle(n, swap) -} - -// cryptoSource is math/rand.Source which takes entropy via crypto/rand. -type cryptoSource struct{} - -// Seed implements math/rand.Source. -func (s *cryptoSource) Seed(int64) {} - -// Int63 implements math/rand.Source. -func (s *cryptoSource) Int63() int64 { - return int64(s.Uint64() >> 1) -} - -// Uint64 implements math/rand.Source64. -func (s *cryptoSource) Uint64() uint64 { - var buf [8]byte - _, _ = crand.Read(buf[:]) // always returns nil - return binary.BigEndian.Uint64(buf[:]) -} diff --git a/pkg/util/salt.go b/pkg/util/salt.go deleted file mode 100644 index b97984e70..000000000 --- a/pkg/util/salt.go +++ /dev/null @@ -1,57 +0,0 @@ -package util - -import ( - "io" -) - -// SaltXOR xors bits of data with salt -// repeating salt if necessary. -func SaltXOR(data, salt []byte) []byte { - return SaltXOROffset(data, salt, 0) -} - -// SaltXOROffset xors bits of data with salt starting from off byte -// repeating salt if necessary. -func SaltXOROffset(data, salt []byte, off int) (result []byte) { - result = make([]byte, len(data)) - ls := len(salt) - if ls == 0 { - copy(result, data) - return - } - - for i := range result { - result[i] = data[i] ^ salt[(i+off)%ls] - } - return -} - -type saltWriter struct { - w io.Writer - - off int - - salt []byte -} - -// NewSaltingWriter returns io.Writer instance that applies -// salt to written data and write the result to w. -func NewSaltingWriter(w io.Writer, salt []byte) io.Writer { - if len(salt) == 0 { - return w - } - - return &saltWriter{ - w: w, - salt: salt, - } -} - -func (w *saltWriter) Write(data []byte) (int, error) { - if dataLen := len(data); dataLen > 0 { - data = SaltXOROffset(data, w.salt, w.off) - w.off += dataLen - } - - return w.w.Write(data) -} diff --git a/pkg/util/salt_test.go b/pkg/util/salt_test.go deleted file mode 100644 index c17a2ec86..000000000 --- a/pkg/util/salt_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package util_test - -import ( - "bytes" - "crypto/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "github.com/stretchr/testify/require" -) - -func randData(sz int) []byte { - data := make([]byte, sz) - - _, _ = rand.Read(data) - - return data -} - -func TestSaltWriter_Write(t *testing.T) { - salt := randData(4) - data := randData(15) - buf := bytes.NewBuffer(nil) - - w := util.NewSaltingWriter(buf, salt) - - _, err := w.Write(data) - require.NoError(t, err) - - require.Equal(t, - buf.Bytes(), - util.SaltXOR(data, salt), - ) -} diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go deleted file mode 100644 index bd15d0e8f..000000000 --- a/pkg/util/sdnotify/sdnotify.go +++ /dev/null @@ -1,94 +0,0 @@ -package sdnotify - -import ( - "errors" - "fmt" - "net" - "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -const ( - ReadyEnabled = "READY=1" - StoppingEnabled = "STOPPING=1" - ReloadingEnabled = "RELOADING=1" -) - -var ( - socket *net.UnixAddr - - errSocketVariableIsNotPresent = errors.New("\"NOTIFY_SOCKET\" environment variable is not present") - errSocketIsNotInitialized = errors.New("socket is not initialized") -) - -// InitSocket initializes socket with provided name of -// environment variable. -func InitSocket() error { - notifySocket := os.Getenv("NOTIFY_SOCKET") - if notifySocket == "" { - return errSocketVariableIsNotPresent - } - socket = &net.UnixAddr{ - Name: notifySocket, - Net: "unixgram", - } - return nil -} - -// FlagAndStatus sends systemd a combination of a -// well-known status and STATUS=%s{status}, separated by newline. -func FlagAndStatus(status string) error { - if status == ReloadingEnabled { - // From https://www.man7.org/linux/man-pages/man5/systemd.service.5.html - // - // When initiating the reload process the service is - // expected to reply with a notification message via - // sd_notify(3) that contains the "RELOADING=1" field in - // combination with "MONOTONIC_USEC=" set to the current - // monotonic time (i.e. CLOCK_MONOTONIC in - // clock_gettime(2)) in μs, formatted as decimal string. - // Once reloading is complete another notification message - // must be sent, containing "READY=1". - // - // For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html - var ts unix.Timespec - if err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts); err != nil { - return fmt.Errorf("clock_gettime: %w", err) - } - status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10) - status += "\nSTATUS=RELOADING" - return Send(status) - } - status += "\nSTATUS=" + strings.TrimSuffix(status, "=1") - return Send(status) -} - -// Status sends systemd notify STATUS=%s{status}. -func Status(status string) error { - return Send("STATUS=" + status) -} - -// ClearStatus resets the current service status previously set by Status. -func ClearStatus() error { - return Status("") -} - -// Send state through the notify socket if any. -// If the notify socket was not detected, it returns an error. -func Send(state string) error { - if socket == nil { - return errSocketIsNotInitialized - } - conn, err := net.DialUnix(socket.Net, nil, socket) - if err != nil { - return fmt.Errorf("can't open unix socket: %v", err) - } - defer conn.Close() - if _, err = conn.Write([]byte(state)); err != nil { - return fmt.Errorf("can't write into the unix socket: %v", err) - } - return nil -} diff --git a/pkg/util/state/storage.go b/pkg/util/state/storage.go deleted file mode 100644 index ee957f270..000000000 --- a/pkg/util/state/storage.go +++ /dev/null @@ -1,71 +0,0 @@ -package state - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - - "go.etcd.io/bbolt" -) - -// PersistentStorage is a wrapper around persistent K:V db that -// provides thread safe functions to set and fetch state variables -// of the Inner Ring and Storage applications. -type PersistentStorage struct { - db *bbolt.DB -} - -var stateBucket = []byte("state") - -// NewPersistentStorage creates a new instance of a storage with 0600 rights. -func NewPersistentStorage(path string) (*PersistentStorage, error) { - db, err := bbolt.Open(path, 0o600, nil) - if err != nil { - return nil, fmt.Errorf("can't open bbolt at %s: %w", path, err) - } - - return &PersistentStorage{db: db}, nil -} - -// SetUInt32 sets a uint32 value in the storage. -func (p PersistentStorage) SetUInt32(key []byte, value uint32) error { - return p.db.Update(func(tx *bbolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(stateBucket) - if err != nil { - return fmt.Errorf("can't create state bucket in state persistent storage: %w", err) - } - - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, uint64(value)) - - return b.Put(key, buf) - }) -} - -// UInt32 returns a uint32 value from persistent storage. If the value does not exist, -// returns 0. -func (p PersistentStorage) UInt32(key []byte) (n uint32, err error) { - err = p.db.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(stateBucket) - if b == nil { - return nil // if bucket not exists yet, return default n = 0 - } - - buf := b.Get(key) - if len(buf) != 8 { - return fmt.Errorf("persistent storage does not store uint data in %s", hex.EncodeToString(key)) - } - - u64 := binary.LittleEndian.Uint64(buf) - n = uint32(u64) - - return nil - }) - - return -} - -// Close closes persistent database instance. -func (p PersistentStorage) Close() error { - return p.db.Close() -} diff --git a/pkg/util/state/storage_test.go b/pkg/util/state/storage_test.go deleted file mode 100644 index 031fa883b..000000000 --- a/pkg/util/state/storage_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package state_test - -import ( - "path/filepath" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" - "github.com/stretchr/testify/require" -) - -func TestPersistentStorage_UInt32(t *testing.T) { - storage, err := state.NewPersistentStorage(filepath.Join(t.TempDir(), ".storage")) - require.NoError(t, err) - defer storage.Close() - - n, err := storage.UInt32([]byte("unset-value")) - require.NoError(t, err) - require.EqualValues(t, 0, n) - - err = storage.SetUInt32([]byte("foo"), 10) - require.NoError(t, err) - - n, err = storage.UInt32([]byte("foo")) - require.NoError(t, err) - require.EqualValues(t, 10, n) -} diff --git a/pkg/util/sync/key_locker.go b/pkg/util/sync/key_locker.go deleted file mode 100644 index 97de0386d..000000000 --- a/pkg/util/sync/key_locker.go +++ /dev/null @@ -1,56 +0,0 @@ -package sync - -import "sync" - -type locker struct { - mtx sync.Mutex - waiters int // not protected by mtx, must used outer mutex to update concurrently -} - -type KeyLocker[K comparable] struct { - lockers map[K]*locker - lockersMtx sync.Mutex -} - -func NewKeyLocker[K comparable]() *KeyLocker[K] { - return &KeyLocker[K]{ - lockers: make(map[K]*locker), - } -} - -func (l *KeyLocker[K]) Lock(key K) { - l.lockersMtx.Lock() - - if locker, found := l.lockers[key]; found { - locker.waiters++ - l.lockersMtx.Unlock() - - locker.mtx.Lock() - return - } - - locker := &locker{ - waiters: 1, - } - locker.mtx.Lock() - - l.lockers[key] = locker - l.lockersMtx.Unlock() -} - -func (l *KeyLocker[K]) Unlock(key K) { - l.lockersMtx.Lock() - defer l.lockersMtx.Unlock() - - locker, found := l.lockers[key] - if !found { - return - } - - if locker.waiters == 1 { - delete(l.lockers, key) - } - locker.waiters-- - - locker.mtx.Unlock() -} diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go deleted file mode 100644 index 259064ecf..000000000 --- a/pkg/util/sync/key_locker_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package sync - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" -) - -func TestKeyLocker(t *testing.T) { - taken := false - eg, _ := errgroup.WithContext(context.Background()) - keyLocker := NewKeyLocker[int]() - for range 100 { - eg.Go(func() error { - keyLocker.Lock(0) - defer keyLocker.Unlock(0) - - require.False(t, taken) - taken = true - require.True(t, taken) - time.Sleep(10 * time.Millisecond) - taken = false - require.False(t, taken) - - return nil - }) - } - require.NoError(t, eg.Wait()) -} diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go deleted file mode 100644 index 7373e538f..000000000 --- a/pkg/util/testing/netmap_source.go +++ /dev/null @@ -1,36 +0,0 @@ -package testing - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -var ( - errInvalidDiff = errors.New("invalid diff") - errNetmapNotFound = errors.New("netmap not found") -) - -type TestNetmapSource struct { - Netmaps map[uint64]*netmap.NetMap - CurrentEpoch uint64 -} - -func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - if diff >= s.CurrentEpoch { - return nil, errInvalidDiff - } - return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) -} - -func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.Netmaps[epoch]; found { - return nm, nil - } - return nil, errNetmapNotFound -} - -func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { - return s.CurrentEpoch, nil -} diff --git a/pkg/util/worker_pool.go b/pkg/util/worker_pool.go deleted file mode 100644 index 97d76c492..000000000 --- a/pkg/util/worker_pool.go +++ /dev/null @@ -1,55 +0,0 @@ -package util - -import ( - "sync/atomic" - - "github.com/panjf2000/ants/v2" -) - -// WorkerPool represents a tool to control -// the execution of go-routine pool. -type WorkerPool interface { - // Submit queues a function for execution - // in a separate routine. - // - // Implementation must return any error encountered - // that prevented the function from being queued. - Submit(func()) error - - // Release releases worker pool resources. All `Submit` calls will - // finish with ErrPoolClosed. It doesn't wait until all submitted - // functions have returned so synchronization must be achieved - // via other means (e.g. sync.WaitGroup). - Release() -} - -// pseudoWorkerPool represents a pseudo worker pool which executes the submitted job immediately in the caller's routine. -type pseudoWorkerPool struct { - closed atomic.Bool -} - -// ErrPoolClosed is returned when submitting task to a closed pool. -var ErrPoolClosed = ants.ErrPoolClosed - -// NewPseudoWorkerPool returns a new instance of a synchronous worker pool. -func NewPseudoWorkerPool() WorkerPool { - return &pseudoWorkerPool{} -} - -// Submit executes the passed function immediately. -// -// Always returns nil. -func (p *pseudoWorkerPool) Submit(fn func()) error { - if p.closed.Load() { - return ErrPoolClosed - } - - fn() - - return nil -} - -// Release implements the WorkerPool interface. -func (p *pseudoWorkerPool) Release() { - p.closed.Store(true) -} diff --git a/pkg/util/worker_pool_test.go b/pkg/util/worker_pool_test.go deleted file mode 100644 index 7de635d80..000000000 --- a/pkg/util/worker_pool_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package util - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSyncWorkerPool(t *testing.T) { - t.Run("submit to released pool", func(t *testing.T) { - p := NewPseudoWorkerPool() - p.Release() - require.Equal(t, ErrPoolClosed, p.Submit(func() {})) - }) - t.Run("create and wait", func(t *testing.T) { - p := NewPseudoWorkerPool() - ch1, ch2 := make(chan struct{}), make(chan struct{}) - wg := new(sync.WaitGroup) - wg.Add(2) - go func(t *testing.T) { - defer wg.Done() - err := p.Submit(newControlledReturnFunc(ch1)) - require.NoError(t, err) - }(t) - go func(t *testing.T) { - defer wg.Done() - err := p.Submit(newControlledReturnFunc(ch2)) - require.NoError(t, err) - }(t) - - // Make sure functions were submitted. - <-ch1 - <-ch2 - p.Release() - require.Equal(t, ErrPoolClosed, p.Submit(func() {})) - - close(ch1) - close(ch2) - wg.Wait() - }) -} - -// newControlledReturnFunc returns function which signals in ch after -// it has started and waits for some value in channel to return. -// ch must be unbuffered. -func newControlledReturnFunc(ch chan struct{}) func() { - return func() { - ch <- struct{}{} - <-ch - } -} diff --git a/scripts/export-metrics/main.go b/scripts/export-metrics/main.go deleted file mode 100644 index 51705ee49..000000000 --- a/scripts/export-metrics/main.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "os" - - local_metrics "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" -) - -var ( - node = flag.String("node", "", "File to export storage node metrics to.") - ir = flag.String("ir", "", "File to export innerring node metrics to.") -) - -func main() { - flag.Parse() - - if *node != "" && *ir != "" { - fmt.Println("-node and -ir flags are mutually exclusive") - os.Exit(1) - } - - var filename string - switch { - case *node != "": - _ = local_metrics.NewNodeMetrics() - filename = *node - case *ir != "": - _ = local_metrics.NewInnerRingMetrics() - filename = *ir - - default: - flag.Usage() - os.Exit(1) - } - - ds := metrics.DescribeAll() - - data, err := json.Marshal(ds) - if err != nil { - fmt.Fprintf(os.Stderr, "Could not parse marshal: %v\n", err) - os.Exit(1) - } - - if err := os.WriteFile(filename, data, 0o644); err != nil { - fmt.Fprintf(os.Stderr, "Could write to file: %v\n", err) - os.Exit(1) - } -} diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go deleted file mode 100644 index 39a420358..000000000 --- a/scripts/populate-metabase/internal/generate.go +++ /dev/null @@ -1,133 +0,0 @@ -package internal - -import ( - cryptorand "crypto/rand" - "crypto/sha256" - "fmt" - "math/rand" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" - "git.frostfs.info/TrueCloudLab/tzhash/tz" -) - -func GeneratePayloadPool(count uint, size uint) [][]byte { - var pool [][]byte - for range count { - payload := make([]byte, size) - _, _ = cryptorand.Read(payload) - - pool = append(pool, payload) - } - return pool -} - -func GenerateAttributePool(count uint) []objectSDK.Attribute { - var pool []objectSDK.Attribute - for i := range count { - for j := range count { - attr := *objectSDK.NewAttribute() - attr.SetKey(fmt.Sprintf("key%d", i)) - attr.SetValue(fmt.Sprintf("value%d", j)) - pool = append(pool, attr) - } - } - return pool -} - -func GenerateOwnerPool(count uint) []user.ID { - var pool []user.ID - for range count { - pool = append(pool, usertest.ID()) - } - return pool -} - -type ObjectOption func(obj *objectSDK.Object) - -func GenerateObject(options ...ObjectOption) *objectSDK.Object { - var ver version.Version - ver.SetMajor(2) - ver.SetMinor(1) - - payload := make([]byte, 0) - - var csum checksum.Checksum - csum.SetSHA256(sha256.Sum256(payload)) - - var csumTZ checksum.Checksum - csumTZ.SetTillichZemor(tz.Sum(csum.Value())) - - obj := objectSDK.New() - obj.SetID(oidtest.ID()) - obj.SetOwnerID(usertest.ID()) - obj.SetContainerID(cidtest.ID()) - - header := objecttest.Object().GetECHeader() - header.SetParent(oidtest.ID()) - obj.SetECHeader(header) - - obj.SetVersion(&ver) - obj.SetPayload(payload) - obj.SetPayloadSize(uint64(len(payload))) - obj.SetPayloadChecksum(csum) - obj.SetPayloadHomomorphicHash(csumTZ) - - for _, option := range options { - option(obj) - } - - return obj -} - -func WithContainerID(cid cid.ID) ObjectOption { - return func(obj *objectSDK.Object) { - obj.SetContainerID(cid) - } -} - -func WithType(typ objectSDK.Type) ObjectOption { - return func(obj *objectSDK.Object) { - obj.SetType(typ) - } -} - -func WithPayloadFromPool(pool [][]byte) ObjectOption { - payload := pool[rand.Intn(len(pool))] - - var csum checksum.Checksum - csum.SetSHA256(sha256.Sum256(payload)) - - var csumTZ checksum.Checksum - csumTZ.SetTillichZemor(tz.Sum(csum.Value())) - - return func(obj *objectSDK.Object) { - obj.SetPayload(payload) - obj.SetPayloadSize(uint64(len(payload))) - obj.SetPayloadChecksum(csum) - obj.SetPayloadHomomorphicHash(csumTZ) - } -} - -func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption { - return func(obj *objectSDK.Object) { - var attrs []objectSDK.Attribute - for range count { - attrs = append(attrs, pool[rand.Intn(len(pool))]) - } - obj.SetAttributes(attrs...) - } -} - -func WithOwnerIDFromPool(pool []user.ID) ObjectOption { - return func(obj *objectSDK.Object) { - obj.SetOwnerID(pool[rand.Intn(len(pool))]) - } -} diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go deleted file mode 100644 index fafe61eaa..000000000 --- a/scripts/populate-metabase/internal/populate.go +++ /dev/null @@ -1,260 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "math/rand" - "sync" - - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "golang.org/x/sync/errgroup" -) - -type EpochState struct{} - -func (s EpochState) CurrentEpoch() uint64 { - return 0 -} - -func PopulateWithObjects( - ctx context.Context, - db *meta.DB, - group *errgroup.Group, - count uint, - factory func() *objectSDK.Object, -) { - digits := "0123456789" - - for range count { - obj := factory() - id := fmt.Appendf(nil, "%c/%c/%c", - digits[rand.Int()%len(digits)], - digits[rand.Int()%len(digits)], - digits[rand.Int()%len(digits)]) - - prm := meta.PutPrm{} - prm.SetObject(obj) - prm.SetStorageID(id) - - group.Go(func() error { - if _, err := db.Put(ctx, prm); err != nil { - return fmt.Errorf("couldn't put an object: %w", err) - } - return nil - }) - } -} - -func PopulateWithBigObjects( - ctx context.Context, - db *meta.DB, - group *errgroup.Group, - count uint, - factory func() *objectSDK.Object, -) { - for range count { - group.Go(func() error { - if err := populateWithBigObject(ctx, db, factory); err != nil { - return fmt.Errorf("couldn't put a big object: %w", err) - } - return nil - }) - } -} - -func populateWithBigObject( - ctx context.Context, - db *meta.DB, - factory func() *objectSDK.Object, -) error { - t := &target{db: db} - - pk, _ := keys.NewPrivateKey() - p := transformer.NewPayloadSizeLimiter(transformer.Params{ - Key: &pk.PrivateKey, - NextTargetInit: func() transformer.ObjectWriter { return t }, - NetworkState: EpochState{}, - MaxSize: 10, - }) - - obj := factory() - payload := make([]byte, 30) - - err := p.WriteHeader(ctx, obj) - if err != nil { - return err - } - - _, err = p.Write(ctx, payload) - if err != nil { - return err - } - - _, err = p.Close(ctx) - if err != nil { - return err - } - - return nil -} - -type target struct { - db *meta.DB -} - -func (t *target) WriteObject(ctx context.Context, obj *objectSDK.Object) error { - prm := meta.PutPrm{} - prm.SetObject(obj) - - _, err := t.db.Put(ctx, prm) - return err -} - -func PopulateGraveyard( - ctx context.Context, - db *meta.DB, - group *errgroup.Group, - workBufferSize int, - count uint, - factory func() *objectSDK.Object, -) { - ts := factory() - ts.SetType(objectSDK.TypeTombstone) - - prm := meta.PutPrm{} - prm.SetObject(ts) - - group.Go(func() error { - if _, err := db.Put(ctx, prm); err != nil { - return fmt.Errorf("couldn't put a tombstone object: %w", err) - } - return nil - }) - - cID, _ := ts.ContainerID() - oID, _ := ts.ID() - - var tsAddr oid.Address - - tsAddr.SetContainer(cID) - tsAddr.SetObject(oID) - - addrs := make(chan oid.Address, workBufferSize) - - go func() { - defer close(addrs) - - wg := &sync.WaitGroup{} - wg.Add(int(count)) - - for range count { - obj := factory() - - prm := meta.PutPrm{} - prm.SetObject(obj) - - group.Go(func() error { - defer wg.Done() - - if _, err := db.Put(ctx, prm); err != nil { - return fmt.Errorf("couldn't put an object: %w", err) - } - - cID, _ := obj.ContainerID() - oID, _ := obj.ID() - - var addr oid.Address - addr.SetContainer(cID) - addr.SetObject(oID) - - addrs <- addr - return nil - }) - } - wg.Wait() - }() - - go func() { - for addr := range addrs { - prm := meta.InhumePrm{} - prm.SetAddresses(addr) - prm.SetTombstoneAddress(tsAddr) - - group.Go(func() error { - if _, err := db.Inhume(ctx, prm); err != nil { - return fmt.Errorf("couldn't inhume an object: %w", err) - } - return nil - }) - } - }() -} - -func PopulateLocked( - ctx context.Context, - db *meta.DB, - group *errgroup.Group, - workBufferSize int, - count uint, - factory func() *objectSDK.Object, -) { - locker := factory() - locker.SetType(objectSDK.TypeLock) - - prm := meta.PutPrm{} - prm.SetObject(locker) - - group.Go(func() error { - if _, err := db.Put(ctx, prm); err != nil { - return fmt.Errorf("couldn't put a locker object: %w", err) - } - return nil - }) - - ids := make(chan oid.ID, workBufferSize) - - go func() { - defer close(ids) - - wg := &sync.WaitGroup{} - wg.Add(int(count)) - - for range count { - defer wg.Done() - - obj := factory() - - prm := meta.PutPrm{} - prm.SetObject(obj) - - group.Go(func() error { - if _, err := db.Put(ctx, prm); err != nil { - return fmt.Errorf("couldn't put an object: %w", err) - } - - id, _ := obj.ID() - ids <- id - return nil - }) - } - wg.Wait() - }() - - go func() { - for id := range ids { - lockerCID, _ := locker.ContainerID() - lockerOID, _ := locker.ID() - - group.Go(func() error { - if err := db.Lock(ctx, lockerCID, lockerOID, []oid.ID{id}); err != nil { - return fmt.Errorf("couldn't lock an object: %w", err) - } - return nil - }) - } - }() -} diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go deleted file mode 100644 index 8c4ea41ad..000000000 --- a/scripts/populate-metabase/main.go +++ /dev/null @@ -1,159 +0,0 @@ -package main - -import ( - "context" - "errors" - "flag" - "fmt" - "os" - - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/scripts/populate-metabase/internal" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "golang.org/x/sync/errgroup" -) - -var ( - path string - force bool - jobs uint - - numContainers, - numObjects, - numAttributesPerObj, - numOwners, - numPayloads, - numAttributes uint -) - -func main() { - flag.StringVar(&path, "path", "", "Path to metabase") - flag.BoolVar(&force, "force", false, "Rewrite existing database") - flag.UintVar(&jobs, "j", 10000, "Number of jobs to run") - - flag.UintVar(&numContainers, "containers", 0, "Number of containers to be created") - flag.UintVar(&numObjects, "objects", 0, "Number of objects per container") - flag.UintVar(&numAttributesPerObj, "attributes", 0, "Number of attributes per object") - - flag.UintVar(&numOwners, "distinct-owners", 10, "Number of distinct owners to be used") - flag.UintVar(&numPayloads, "distinct-payloads", 10, "Number of distinct payloads to be used") - flag.UintVar(&numAttributes, "distinct-attributes", 10, "Number of distinct attributes to be used") - - flag.Parse() - - exitIf(numPayloads == 0, "must have payloads\n") - exitIf(numAttributes == 0, "must have attributes\n") - exitIf(numOwners == 0, "must have owners\n") - exitIf(len(path) == 0, "path to metabase not specified\n") - exitIf( - numAttributesPerObj > numAttributes, - "object can't have more attributes than available\n", - ) - - info, err := os.Stat(path) - exitIf( - err != nil && !errors.Is(err, os.ErrNotExist), - "couldn't get path info: %s\n", err, - ) - - // Path exits. - if err == nil { - exitIf(info.IsDir(), "path is a directory\n") - exitIf(!force, "couldn't rewrite existing file, use '-force' flag\n") - - err = os.Remove(path) - exitIf(err != nil, "couldn't remove existing file: %s\n", err) - } - - err = populate() - exitIf(err != nil, "couldn't populate the metabase: %s\n", err) -} - -func getObjectFactory(opts ...internal.ObjectOption) func() *objectSDK.Object { - return func() *objectSDK.Object { - return internal.GenerateObject(opts...) - } -} - -func populate() (err error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - db := meta.New([]meta.Option{ - meta.WithPath(path), - meta.WithPermissions(0o600), - meta.WithEpochState(internal.EpochState{}), - }...) - - if err = db.Open(ctx, mode.ReadWrite); err != nil { - return fmt.Errorf("couldn't open the metabase: %w", err) - } - defer func() { - if errOnClose := db.Close(ctx); errOnClose != nil { - err = errors.Join( - err, - fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)), - ) - } - }() - - if err = db.Init(ctx); err != nil { - return fmt.Errorf("couldn't init the metabase: %w", err) - } - - payloads := internal.GeneratePayloadPool(numPayloads, 32) - attributes := internal.GenerateAttributePool(numAttributes) - owners := internal.GenerateOwnerPool(numOwners) - - types := []objectSDK.Type{ - objectSDK.TypeRegular, - objectSDK.TypeLock, - objectSDK.TypeTombstone, - } - - eg, ctx := errgroup.WithContext(ctx) - eg.SetLimit(int(jobs)) - - for range numContainers { - cid := cidtest.ID() - - for _, typ := range types { - internal.PopulateWithObjects(ctx, db, eg, numObjects, getObjectFactory( - internal.WithContainerID(cid), - internal.WithType(typ), - internal.WithPayloadFromPool(payloads), - internal.WithOwnerIDFromPool(owners), - internal.WithAttributesFromPool(attributes, numAttributesPerObj), - )) - } - internal.PopulateWithBigObjects(ctx, db, eg, numObjects, getObjectFactory( - internal.WithContainerID(cid), - internal.WithType(objectSDK.TypeRegular), - internal.WithAttributesFromPool(attributes, numAttributesPerObj), - internal.WithOwnerIDFromPool(owners), - )) - internal.PopulateGraveyard(ctx, db, eg, int(jobs), numObjects, getObjectFactory( - internal.WithContainerID(cid), - internal.WithType(objectSDK.TypeRegular), - internal.WithAttributesFromPool(attributes, numAttributesPerObj), - internal.WithOwnerIDFromPool(owners), - )) - internal.PopulateLocked(ctx, db, eg, int(jobs), numObjects, getObjectFactory( - internal.WithContainerID(cid), - internal.WithType(objectSDK.TypeRegular), - internal.WithAttributesFromPool(attributes, numAttributesPerObj), - internal.WithOwnerIDFromPool(owners), - )) - } - - return eg.Wait() -} - -func exitIf(cond bool, format string, args ...any) { - if cond { - fmt.Fprintf(os.Stderr, format, args...) - os.Exit(1) - } -}